| 1 | //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines the interfaces that ARM uses to lower LLVM code into a |
| 10 | // selection DAG. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "ARMISelLowering.h" |
| 15 | #include "ARMBaseInstrInfo.h" |
| 16 | #include "ARMBaseRegisterInfo.h" |
| 17 | #include "ARMCallingConv.h" |
| 18 | #include "ARMConstantPoolValue.h" |
| 19 | #include "ARMMachineFunctionInfo.h" |
| 20 | #include "ARMPerfectShuffle.h" |
| 21 | #include "ARMRegisterInfo.h" |
| 22 | #include "ARMSelectionDAGInfo.h" |
| 23 | #include "ARMSubtarget.h" |
| 24 | #include "MCTargetDesc/ARMAddressingModes.h" |
| 25 | #include "MCTargetDesc/ARMBaseInfo.h" |
| 26 | #include "Utils/ARMBaseInfo.h" |
| 27 | #include "llvm/ADT/APFloat.h" |
| 28 | #include "llvm/ADT/APInt.h" |
| 29 | #include "llvm/ADT/ArrayRef.h" |
| 30 | #include "llvm/ADT/BitVector.h" |
| 31 | #include "llvm/ADT/DenseMap.h" |
| 32 | #include "llvm/ADT/STLExtras.h" |
| 33 | #include "llvm/ADT/SmallPtrSet.h" |
| 34 | #include "llvm/ADT/SmallVector.h" |
| 35 | #include "llvm/ADT/Statistic.h" |
| 36 | #include "llvm/ADT/StringExtras.h" |
| 37 | #include "llvm/ADT/StringRef.h" |
| 38 | #include "llvm/ADT/StringSwitch.h" |
| 39 | #include "llvm/ADT/Triple.h" |
| 40 | #include "llvm/ADT/Twine.h" |
| 41 | #include "llvm/Analysis/VectorUtils.h" |
| 42 | #include "llvm/CodeGen/CallingConvLower.h" |
| 43 | #include "llvm/CodeGen/ISDOpcodes.h" |
| 44 | #include "llvm/CodeGen/IntrinsicLowering.h" |
| 45 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 46 | #include "llvm/CodeGen/MachineConstantPool.h" |
| 47 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 48 | #include "llvm/CodeGen/MachineFunction.h" |
| 49 | #include "llvm/CodeGen/MachineInstr.h" |
| 50 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 51 | #include "llvm/CodeGen/MachineJumpTableInfo.h" |
| 52 | #include "llvm/CodeGen/MachineMemOperand.h" |
| 53 | #include "llvm/CodeGen/MachineOperand.h" |
| 54 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 55 | #include "llvm/CodeGen/RuntimeLibcalls.h" |
| 56 | #include "llvm/CodeGen/SelectionDAG.h" |
| 57 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
| 58 | #include "llvm/CodeGen/TargetInstrInfo.h" |
| 59 | #include "llvm/CodeGen/TargetLowering.h" |
| 60 | #include "llvm/CodeGen/TargetOpcodes.h" |
| 61 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
| 62 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| 63 | #include "llvm/CodeGen/ValueTypes.h" |
| 64 | #include "llvm/IR/Attributes.h" |
| 65 | #include "llvm/IR/CallingConv.h" |
| 66 | #include "llvm/IR/Constant.h" |
| 67 | #include "llvm/IR/Constants.h" |
| 68 | #include "llvm/IR/DataLayout.h" |
| 69 | #include "llvm/IR/DebugLoc.h" |
| 70 | #include "llvm/IR/DerivedTypes.h" |
| 71 | #include "llvm/IR/Function.h" |
| 72 | #include "llvm/IR/GlobalAlias.h" |
| 73 | #include "llvm/IR/GlobalValue.h" |
| 74 | #include "llvm/IR/GlobalVariable.h" |
| 75 | #include "llvm/IR/IRBuilder.h" |
| 76 | #include "llvm/IR/InlineAsm.h" |
| 77 | #include "llvm/IR/Instruction.h" |
| 78 | #include "llvm/IR/Instructions.h" |
| 79 | #include "llvm/IR/IntrinsicInst.h" |
| 80 | #include "llvm/IR/Intrinsics.h" |
| 81 | #include "llvm/IR/Module.h" |
| 82 | #include "llvm/IR/PatternMatch.h" |
| 83 | #include "llvm/IR/Type.h" |
| 84 | #include "llvm/IR/User.h" |
| 85 | #include "llvm/IR/Value.h" |
| 86 | #include "llvm/MC/MCInstrDesc.h" |
| 87 | #include "llvm/MC/MCInstrItineraries.h" |
| 88 | #include "llvm/MC/MCRegisterInfo.h" |
| 89 | #include "llvm/MC/MCSchedule.h" |
| 90 | #include "llvm/Support/AtomicOrdering.h" |
| 91 | #include "llvm/Support/BranchProbability.h" |
| 92 | #include "llvm/Support/Casting.h" |
| 93 | #include "llvm/Support/CodeGen.h" |
| 94 | #include "llvm/Support/CommandLine.h" |
| 95 | #include "llvm/Support/Compiler.h" |
| 96 | #include "llvm/Support/Debug.h" |
| 97 | #include "llvm/Support/ErrorHandling.h" |
| 98 | #include "llvm/Support/KnownBits.h" |
| 99 | #include "llvm/Support/MachineValueType.h" |
| 100 | #include "llvm/Support/MathExtras.h" |
| 101 | #include "llvm/Support/raw_ostream.h" |
| 102 | #include "llvm/Target/TargetMachine.h" |
| 103 | #include "llvm/Target/TargetOptions.h" |
| 104 | #include <algorithm> |
| 105 | #include <cassert> |
| 106 | #include <cstdint> |
| 107 | #include <cstdlib> |
| 108 | #include <iterator> |
| 109 | #include <limits> |
| 110 | #include <string> |
| 111 | #include <tuple> |
| 112 | #include <utility> |
| 113 | #include <vector> |
| 114 | |
| 115 | using namespace llvm; |
| 116 | using namespace llvm::PatternMatch; |
| 117 | |
| 118 | #define DEBUG_TYPE "arm-isel" |
| 119 | |
| 120 | STATISTIC(NumTailCalls, "Number of tail calls" ); |
| 121 | STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt" ); |
| 122 | STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments" ); |
| 123 | STATISTIC(NumConstpoolPromoted, |
| 124 | "Number of constants with their storage promoted into constant pools" ); |
| 125 | |
| 126 | static cl::opt<bool> |
| 127 | ARMInterworking("arm-interworking" , cl::Hidden, |
| 128 | cl::desc("Enable / disable ARM interworking (for debugging only)" ), |
| 129 | cl::init(true)); |
| 130 | |
| 131 | static cl::opt<bool> EnableConstpoolPromotion( |
| 132 | "arm-promote-constant" , cl::Hidden, |
| 133 | cl::desc("Enable / disable promotion of unnamed_addr constants into " |
| 134 | "constant pools" ), |
| 135 | cl::init(false)); // FIXME: set to true by default once PR32780 is fixed |
| 136 | static cl::opt<unsigned> ConstpoolPromotionMaxSize( |
| 137 | "arm-promote-constant-max-size" , cl::Hidden, |
| 138 | cl::desc("Maximum size of constant to promote into a constant pool" ), |
| 139 | cl::init(64)); |
| 140 | static cl::opt<unsigned> ConstpoolPromotionMaxTotal( |
| 141 | "arm-promote-constant-max-total" , cl::Hidden, |
| 142 | cl::desc("Maximum size of ALL constants to promote into a constant pool" ), |
| 143 | cl::init(128)); |
| 144 | |
| 145 | // The APCS parameter registers. |
| 146 | static const MCPhysReg GPRArgRegs[] = { |
| 147 | ARM::R0, ARM::R1, ARM::R2, ARM::R3 |
| 148 | }; |
| 149 | |
| 150 | void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, |
| 151 | MVT PromotedBitwiseVT) { |
| 152 | if (VT != PromotedLdStVT) { |
| 153 | setOperationAction(ISD::LOAD, VT, Promote); |
| 154 | AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); |
| 155 | |
| 156 | setOperationAction(ISD::STORE, VT, Promote); |
| 157 | AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); |
| 158 | } |
| 159 | |
| 160 | MVT ElemTy = VT.getVectorElementType(); |
| 161 | if (ElemTy != MVT::f64) |
| 162 | setOperationAction(ISD::SETCC, VT, Custom); |
| 163 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); |
| 164 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); |
| 165 | if (ElemTy == MVT::i32) { |
| 166 | setOperationAction(ISD::SINT_TO_FP, VT, Custom); |
| 167 | setOperationAction(ISD::UINT_TO_FP, VT, Custom); |
| 168 | setOperationAction(ISD::FP_TO_SINT, VT, Custom); |
| 169 | setOperationAction(ISD::FP_TO_UINT, VT, Custom); |
| 170 | } else { |
| 171 | setOperationAction(ISD::SINT_TO_FP, VT, Expand); |
| 172 | setOperationAction(ISD::UINT_TO_FP, VT, Expand); |
| 173 | setOperationAction(ISD::FP_TO_SINT, VT, Expand); |
| 174 | setOperationAction(ISD::FP_TO_UINT, VT, Expand); |
| 175 | } |
| 176 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); |
| 177 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); |
| 178 | setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); |
| 179 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); |
| 180 | setOperationAction(ISD::SELECT, VT, Expand); |
| 181 | setOperationAction(ISD::SELECT_CC, VT, Expand); |
| 182 | setOperationAction(ISD::VSELECT, VT, Expand); |
| 183 | setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); |
| 184 | if (VT.isInteger()) { |
| 185 | setOperationAction(ISD::SHL, VT, Custom); |
| 186 | setOperationAction(ISD::SRA, VT, Custom); |
| 187 | setOperationAction(ISD::SRL, VT, Custom); |
| 188 | } |
| 189 | |
| 190 | // Promote all bit-wise operations. |
| 191 | if (VT.isInteger() && VT != PromotedBitwiseVT) { |
| 192 | setOperationAction(ISD::AND, VT, Promote); |
| 193 | AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); |
| 194 | setOperationAction(ISD::OR, VT, Promote); |
| 195 | AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); |
| 196 | setOperationAction(ISD::XOR, VT, Promote); |
| 197 | AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); |
| 198 | } |
| 199 | |
| 200 | // Neon does not support vector divide/remainder operations. |
| 201 | setOperationAction(ISD::SDIV, VT, Expand); |
| 202 | setOperationAction(ISD::UDIV, VT, Expand); |
| 203 | setOperationAction(ISD::FDIV, VT, Expand); |
| 204 | setOperationAction(ISD::SREM, VT, Expand); |
| 205 | setOperationAction(ISD::UREM, VT, Expand); |
| 206 | setOperationAction(ISD::FREM, VT, Expand); |
| 207 | |
| 208 | if (!VT.isFloatingPoint() && |
| 209 | VT != MVT::v2i64 && VT != MVT::v1i64) |
| 210 | for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) |
| 211 | setOperationAction(Opcode, VT, Legal); |
| 212 | } |
| 213 | |
| 214 | void ARMTargetLowering::addDRTypeForNEON(MVT VT) { |
| 215 | addRegisterClass(VT, &ARM::DPRRegClass); |
| 216 | addTypeForNEON(VT, MVT::f64, MVT::v2i32); |
| 217 | } |
| 218 | |
| 219 | void ARMTargetLowering::addQRTypeForNEON(MVT VT) { |
| 220 | addRegisterClass(VT, &ARM::DPairRegClass); |
| 221 | addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); |
| 222 | } |
| 223 | |
| 224 | ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, |
| 225 | const ARMSubtarget &STI) |
| 226 | : TargetLowering(TM), Subtarget(&STI) { |
| 227 | RegInfo = Subtarget->getRegisterInfo(); |
| 228 | Itins = Subtarget->getInstrItineraryData(); |
| 229 | |
| 230 | setBooleanContents(ZeroOrOneBooleanContent); |
| 231 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); |
| 232 | |
| 233 | if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() && |
| 234 | !Subtarget->isTargetWatchOS()) { |
| 235 | bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard; |
| 236 | for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID) |
| 237 | setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID), |
| 238 | IsHFTarget ? CallingConv::ARM_AAPCS_VFP |
| 239 | : CallingConv::ARM_AAPCS); |
| 240 | } |
| 241 | |
| 242 | if (Subtarget->isTargetMachO()) { |
| 243 | // Uses VFP for Thumb libfuncs if available. |
| 244 | if (Subtarget->isThumb() && Subtarget->hasVFP2Base() && |
| 245 | Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { |
| 246 | static const struct { |
| 247 | const RTLIB::Libcall Op; |
| 248 | const char * const Name; |
| 249 | const ISD::CondCode Cond; |
| 250 | } LibraryCalls[] = { |
| 251 | // Single-precision floating-point arithmetic. |
| 252 | { RTLIB::ADD_F32, "__addsf3vfp" , ISD::SETCC_INVALID }, |
| 253 | { RTLIB::SUB_F32, "__subsf3vfp" , ISD::SETCC_INVALID }, |
| 254 | { RTLIB::MUL_F32, "__mulsf3vfp" , ISD::SETCC_INVALID }, |
| 255 | { RTLIB::DIV_F32, "__divsf3vfp" , ISD::SETCC_INVALID }, |
| 256 | |
| 257 | // Double-precision floating-point arithmetic. |
| 258 | { RTLIB::ADD_F64, "__adddf3vfp" , ISD::SETCC_INVALID }, |
| 259 | { RTLIB::SUB_F64, "__subdf3vfp" , ISD::SETCC_INVALID }, |
| 260 | { RTLIB::MUL_F64, "__muldf3vfp" , ISD::SETCC_INVALID }, |
| 261 | { RTLIB::DIV_F64, "__divdf3vfp" , ISD::SETCC_INVALID }, |
| 262 | |
| 263 | // Single-precision comparisons. |
| 264 | { RTLIB::OEQ_F32, "__eqsf2vfp" , ISD::SETNE }, |
| 265 | { RTLIB::UNE_F32, "__nesf2vfp" , ISD::SETNE }, |
| 266 | { RTLIB::OLT_F32, "__ltsf2vfp" , ISD::SETNE }, |
| 267 | { RTLIB::OLE_F32, "__lesf2vfp" , ISD::SETNE }, |
| 268 | { RTLIB::OGE_F32, "__gesf2vfp" , ISD::SETNE }, |
| 269 | { RTLIB::OGT_F32, "__gtsf2vfp" , ISD::SETNE }, |
| 270 | { RTLIB::UO_F32, "__unordsf2vfp" , ISD::SETNE }, |
| 271 | { RTLIB::O_F32, "__unordsf2vfp" , ISD::SETEQ }, |
| 272 | |
| 273 | // Double-precision comparisons. |
| 274 | { RTLIB::OEQ_F64, "__eqdf2vfp" , ISD::SETNE }, |
| 275 | { RTLIB::UNE_F64, "__nedf2vfp" , ISD::SETNE }, |
| 276 | { RTLIB::OLT_F64, "__ltdf2vfp" , ISD::SETNE }, |
| 277 | { RTLIB::OLE_F64, "__ledf2vfp" , ISD::SETNE }, |
| 278 | { RTLIB::OGE_F64, "__gedf2vfp" , ISD::SETNE }, |
| 279 | { RTLIB::OGT_F64, "__gtdf2vfp" , ISD::SETNE }, |
| 280 | { RTLIB::UO_F64, "__unorddf2vfp" , ISD::SETNE }, |
| 281 | { RTLIB::O_F64, "__unorddf2vfp" , ISD::SETEQ }, |
| 282 | |
| 283 | // Floating-point to integer conversions. |
| 284 | // i64 conversions are done via library routines even when generating VFP |
| 285 | // instructions, so use the same ones. |
| 286 | { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp" , ISD::SETCC_INVALID }, |
| 287 | { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp" , ISD::SETCC_INVALID }, |
| 288 | { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp" , ISD::SETCC_INVALID }, |
| 289 | { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp" , ISD::SETCC_INVALID }, |
| 290 | |
| 291 | // Conversions between floating types. |
| 292 | { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp" , ISD::SETCC_INVALID }, |
| 293 | { RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp" , ISD::SETCC_INVALID }, |
| 294 | |
| 295 | // Integer to floating-point conversions. |
| 296 | // i64 conversions are done via library routines even when generating VFP |
| 297 | // instructions, so use the same ones. |
| 298 | // FIXME: There appears to be some naming inconsistency in ARM libgcc: |
| 299 | // e.g., __floatunsidf vs. __floatunssidfvfp. |
| 300 | { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp" , ISD::SETCC_INVALID }, |
| 301 | { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp" , ISD::SETCC_INVALID }, |
| 302 | { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp" , ISD::SETCC_INVALID }, |
| 303 | { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp" , ISD::SETCC_INVALID }, |
| 304 | }; |
| 305 | |
| 306 | for (const auto &LC : LibraryCalls) { |
| 307 | setLibcallName(LC.Op, LC.Name); |
| 308 | if (LC.Cond != ISD::SETCC_INVALID) |
| 309 | setCmpLibcallCC(LC.Op, LC.Cond); |
| 310 | } |
| 311 | } |
| 312 | } |
| 313 | |
| 314 | // These libcalls are not available in 32-bit. |
| 315 | setLibcallName(RTLIB::SHL_I128, nullptr); |
| 316 | setLibcallName(RTLIB::SRL_I128, nullptr); |
| 317 | setLibcallName(RTLIB::SRA_I128, nullptr); |
| 318 | |
| 319 | // RTLIB |
| 320 | if (Subtarget->isAAPCS_ABI() && |
| 321 | (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() || |
| 322 | Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) { |
| 323 | static const struct { |
| 324 | const RTLIB::Libcall Op; |
| 325 | const char * const Name; |
| 326 | const CallingConv::ID CC; |
| 327 | const ISD::CondCode Cond; |
| 328 | } LibraryCalls[] = { |
| 329 | // Double-precision floating-point arithmetic helper functions |
| 330 | // RTABI chapter 4.1.2, Table 2 |
| 331 | { RTLIB::ADD_F64, "__aeabi_dadd" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 332 | { RTLIB::DIV_F64, "__aeabi_ddiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 333 | { RTLIB::MUL_F64, "__aeabi_dmul" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 334 | { RTLIB::SUB_F64, "__aeabi_dsub" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 335 | |
| 336 | // Double-precision floating-point comparison helper functions |
| 337 | // RTABI chapter 4.1.2, Table 3 |
| 338 | { RTLIB::OEQ_F64, "__aeabi_dcmpeq" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 339 | { RTLIB::UNE_F64, "__aeabi_dcmpeq" , CallingConv::ARM_AAPCS, ISD::SETEQ }, |
| 340 | { RTLIB::OLT_F64, "__aeabi_dcmplt" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 341 | { RTLIB::OLE_F64, "__aeabi_dcmple" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 342 | { RTLIB::OGE_F64, "__aeabi_dcmpge" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 343 | { RTLIB::OGT_F64, "__aeabi_dcmpgt" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 344 | { RTLIB::UO_F64, "__aeabi_dcmpun" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 345 | { RTLIB::O_F64, "__aeabi_dcmpun" , CallingConv::ARM_AAPCS, ISD::SETEQ }, |
| 346 | |
| 347 | // Single-precision floating-point arithmetic helper functions |
| 348 | // RTABI chapter 4.1.2, Table 4 |
| 349 | { RTLIB::ADD_F32, "__aeabi_fadd" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 350 | { RTLIB::DIV_F32, "__aeabi_fdiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 351 | { RTLIB::MUL_F32, "__aeabi_fmul" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 352 | { RTLIB::SUB_F32, "__aeabi_fsub" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 353 | |
| 354 | // Single-precision floating-point comparison helper functions |
| 355 | // RTABI chapter 4.1.2, Table 5 |
| 356 | { RTLIB::OEQ_F32, "__aeabi_fcmpeq" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 357 | { RTLIB::UNE_F32, "__aeabi_fcmpeq" , CallingConv::ARM_AAPCS, ISD::SETEQ }, |
| 358 | { RTLIB::OLT_F32, "__aeabi_fcmplt" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 359 | { RTLIB::OLE_F32, "__aeabi_fcmple" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 360 | { RTLIB::OGE_F32, "__aeabi_fcmpge" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 361 | { RTLIB::OGT_F32, "__aeabi_fcmpgt" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 362 | { RTLIB::UO_F32, "__aeabi_fcmpun" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 363 | { RTLIB::O_F32, "__aeabi_fcmpun" , CallingConv::ARM_AAPCS, ISD::SETEQ }, |
| 364 | |
| 365 | // Floating-point to integer conversions. |
| 366 | // RTABI chapter 4.1.2, Table 6 |
| 367 | { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 368 | { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 369 | { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 370 | { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 371 | { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 372 | { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 373 | { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 374 | { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 375 | |
| 376 | // Conversions between floating types. |
| 377 | // RTABI chapter 4.1.2, Table 7 |
| 378 | { RTLIB::FPROUND_F64_F32, "__aeabi_d2f" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 379 | { RTLIB::FPROUND_F64_F16, "__aeabi_d2h" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 380 | { RTLIB::FPEXT_F32_F64, "__aeabi_f2d" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 381 | |
| 382 | // Integer to floating-point conversions. |
| 383 | // RTABI chapter 4.1.2, Table 8 |
| 384 | { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 385 | { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 386 | { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 387 | { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 388 | { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 389 | { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 390 | { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 391 | { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 392 | |
| 393 | // Long long helper functions |
| 394 | // RTABI chapter 4.2, Table 9 |
| 395 | { RTLIB::MUL_I64, "__aeabi_lmul" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 396 | { RTLIB::SHL_I64, "__aeabi_llsl" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 397 | { RTLIB::SRL_I64, "__aeabi_llsr" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 398 | { RTLIB::SRA_I64, "__aeabi_lasr" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 399 | |
| 400 | // Integer division functions |
| 401 | // RTABI chapter 4.3.1 |
| 402 | { RTLIB::SDIV_I8, "__aeabi_idiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 403 | { RTLIB::SDIV_I16, "__aeabi_idiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 404 | { RTLIB::SDIV_I32, "__aeabi_idiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 405 | { RTLIB::SDIV_I64, "__aeabi_ldivmod" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 406 | { RTLIB::UDIV_I8, "__aeabi_uidiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 407 | { RTLIB::UDIV_I16, "__aeabi_uidiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 408 | { RTLIB::UDIV_I32, "__aeabi_uidiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 409 | { RTLIB::UDIV_I64, "__aeabi_uldivmod" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 410 | }; |
| 411 | |
| 412 | for (const auto &LC : LibraryCalls) { |
| 413 | setLibcallName(LC.Op, LC.Name); |
| 414 | setLibcallCallingConv(LC.Op, LC.CC); |
| 415 | if (LC.Cond != ISD::SETCC_INVALID) |
| 416 | setCmpLibcallCC(LC.Op, LC.Cond); |
| 417 | } |
| 418 | |
| 419 | // EABI dependent RTLIB |
| 420 | if (TM.Options.EABIVersion == EABI::EABI4 || |
| 421 | TM.Options.EABIVersion == EABI::EABI5) { |
| 422 | static const struct { |
| 423 | const RTLIB::Libcall Op; |
| 424 | const char *const Name; |
| 425 | const CallingConv::ID CC; |
| 426 | const ISD::CondCode Cond; |
| 427 | } MemOpsLibraryCalls[] = { |
| 428 | // Memory operations |
| 429 | // RTABI chapter 4.3.4 |
| 430 | { RTLIB::MEMCPY, "__aeabi_memcpy" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 431 | { RTLIB::MEMMOVE, "__aeabi_memmove" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 432 | { RTLIB::MEMSET, "__aeabi_memset" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 433 | }; |
| 434 | |
| 435 | for (const auto &LC : MemOpsLibraryCalls) { |
| 436 | setLibcallName(LC.Op, LC.Name); |
| 437 | setLibcallCallingConv(LC.Op, LC.CC); |
| 438 | if (LC.Cond != ISD::SETCC_INVALID) |
| 439 | setCmpLibcallCC(LC.Op, LC.Cond); |
| 440 | } |
| 441 | } |
| 442 | } |
| 443 | |
| 444 | if (Subtarget->isTargetWindows()) { |
| 445 | static const struct { |
| 446 | const RTLIB::Libcall Op; |
| 447 | const char * const Name; |
| 448 | const CallingConv::ID CC; |
| 449 | } LibraryCalls[] = { |
| 450 | { RTLIB::FPTOSINT_F32_I64, "__stoi64" , CallingConv::ARM_AAPCS_VFP }, |
| 451 | { RTLIB::FPTOSINT_F64_I64, "__dtoi64" , CallingConv::ARM_AAPCS_VFP }, |
| 452 | { RTLIB::FPTOUINT_F32_I64, "__stou64" , CallingConv::ARM_AAPCS_VFP }, |
| 453 | { RTLIB::FPTOUINT_F64_I64, "__dtou64" , CallingConv::ARM_AAPCS_VFP }, |
| 454 | { RTLIB::SINTTOFP_I64_F32, "__i64tos" , CallingConv::ARM_AAPCS_VFP }, |
| 455 | { RTLIB::SINTTOFP_I64_F64, "__i64tod" , CallingConv::ARM_AAPCS_VFP }, |
| 456 | { RTLIB::UINTTOFP_I64_F32, "__u64tos" , CallingConv::ARM_AAPCS_VFP }, |
| 457 | { RTLIB::UINTTOFP_I64_F64, "__u64tod" , CallingConv::ARM_AAPCS_VFP }, |
| 458 | }; |
| 459 | |
| 460 | for (const auto &LC : LibraryCalls) { |
| 461 | setLibcallName(LC.Op, LC.Name); |
| 462 | setLibcallCallingConv(LC.Op, LC.CC); |
| 463 | } |
| 464 | } |
| 465 | |
| 466 | // Use divmod compiler-rt calls for iOS 5.0 and later. |
| 467 | if (Subtarget->isTargetMachO() && |
| 468 | !(Subtarget->isTargetIOS() && |
| 469 | Subtarget->getTargetTriple().isOSVersionLT(5, 0))) { |
| 470 | setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4" ); |
| 471 | setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4" ); |
| 472 | } |
| 473 | |
| 474 | // The half <-> float conversion functions are always soft-float on |
| 475 | // non-watchos platforms, but are needed for some targets which use a |
| 476 | // hard-float calling convention by default. |
| 477 | if (!Subtarget->isTargetWatchABI()) { |
| 478 | if (Subtarget->isAAPCS_ABI()) { |
| 479 | setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS); |
| 480 | setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS); |
| 481 | setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS); |
| 482 | } else { |
| 483 | setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS); |
| 484 | setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS); |
| 485 | setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS); |
| 486 | } |
| 487 | } |
| 488 | |
| 489 | // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have |
| 490 | // a __gnu_ prefix (which is the default). |
| 491 | if (Subtarget->isTargetAEABI()) { |
| 492 | static const struct { |
| 493 | const RTLIB::Libcall Op; |
| 494 | const char * const Name; |
| 495 | const CallingConv::ID CC; |
| 496 | } LibraryCalls[] = { |
| 497 | { RTLIB::FPROUND_F32_F16, "__aeabi_f2h" , CallingConv::ARM_AAPCS }, |
| 498 | { RTLIB::FPROUND_F64_F16, "__aeabi_d2h" , CallingConv::ARM_AAPCS }, |
| 499 | { RTLIB::FPEXT_F16_F32, "__aeabi_h2f" , CallingConv::ARM_AAPCS }, |
| 500 | }; |
| 501 | |
| 502 | for (const auto &LC : LibraryCalls) { |
| 503 | setLibcallName(LC.Op, LC.Name); |
| 504 | setLibcallCallingConv(LC.Op, LC.CC); |
| 505 | } |
| 506 | } |
| 507 | |
| 508 | if (Subtarget->isThumb1Only()) |
| 509 | addRegisterClass(MVT::i32, &ARM::tGPRRegClass); |
| 510 | else |
| 511 | addRegisterClass(MVT::i32, &ARM::GPRRegClass); |
| 512 | |
| 513 | if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && |
| 514 | !Subtarget->isThumb1Only()) { |
| 515 | addRegisterClass(MVT::f32, &ARM::SPRRegClass); |
| 516 | addRegisterClass(MVT::f64, &ARM::DPRRegClass); |
| 517 | } |
| 518 | |
| 519 | if (Subtarget->hasFullFP16()) { |
| 520 | addRegisterClass(MVT::f16, &ARM::HPRRegClass); |
| 521 | setOperationAction(ISD::BITCAST, MVT::i16, Custom); |
| 522 | setOperationAction(ISD::BITCAST, MVT::i32, Custom); |
| 523 | setOperationAction(ISD::BITCAST, MVT::f16, Custom); |
| 524 | |
| 525 | setOperationAction(ISD::FMINNUM, MVT::f16, Legal); |
| 526 | setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); |
| 527 | } |
| 528 | |
| 529 | for (MVT VT : MVT::vector_valuetypes()) { |
| 530 | for (MVT InnerVT : MVT::vector_valuetypes()) { |
| 531 | setTruncStoreAction(VT, InnerVT, Expand); |
| 532 | setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); |
| 533 | setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); |
| 534 | setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); |
| 535 | } |
| 536 | |
| 537 | setOperationAction(ISD::MULHS, VT, Expand); |
| 538 | setOperationAction(ISD::SMUL_LOHI, VT, Expand); |
| 539 | setOperationAction(ISD::MULHU, VT, Expand); |
| 540 | setOperationAction(ISD::UMUL_LOHI, VT, Expand); |
| 541 | |
| 542 | setOperationAction(ISD::BSWAP, VT, Expand); |
| 543 | } |
| 544 | |
| 545 | setOperationAction(ISD::ConstantFP, MVT::f32, Custom); |
| 546 | setOperationAction(ISD::ConstantFP, MVT::f64, Custom); |
| 547 | |
| 548 | setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom); |
| 549 | setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom); |
| 550 | |
| 551 | if (Subtarget->hasNEON()) { |
| 552 | addDRTypeForNEON(MVT::v2f32); |
| 553 | addDRTypeForNEON(MVT::v8i8); |
| 554 | addDRTypeForNEON(MVT::v4i16); |
| 555 | addDRTypeForNEON(MVT::v2i32); |
| 556 | addDRTypeForNEON(MVT::v1i64); |
| 557 | |
| 558 | addQRTypeForNEON(MVT::v4f32); |
| 559 | addQRTypeForNEON(MVT::v2f64); |
| 560 | addQRTypeForNEON(MVT::v16i8); |
| 561 | addQRTypeForNEON(MVT::v8i16); |
| 562 | addQRTypeForNEON(MVT::v4i32); |
| 563 | addQRTypeForNEON(MVT::v2i64); |
| 564 | |
| 565 | if (Subtarget->hasFullFP16()) { |
| 566 | addQRTypeForNEON(MVT::v8f16); |
| 567 | addDRTypeForNEON(MVT::v4f16); |
| 568 | } |
| 569 | |
| 570 | // v2f64 is legal so that QR subregs can be extracted as f64 elements, but |
| 571 | // neither Neon nor VFP support any arithmetic operations on it. |
| 572 | // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively |
| 573 | // supported for v4f32. |
| 574 | setOperationAction(ISD::FADD, MVT::v2f64, Expand); |
| 575 | setOperationAction(ISD::FSUB, MVT::v2f64, Expand); |
| 576 | setOperationAction(ISD::FMUL, MVT::v2f64, Expand); |
| 577 | // FIXME: Code duplication: FDIV and FREM are expanded always, see |
| 578 | // ARMTargetLowering::addTypeForNEON method for details. |
| 579 | setOperationAction(ISD::FDIV, MVT::v2f64, Expand); |
| 580 | setOperationAction(ISD::FREM, MVT::v2f64, Expand); |
| 581 | // FIXME: Create unittest. |
| 582 | // In another words, find a way when "copysign" appears in DAG with vector |
| 583 | // operands. |
| 584 | setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); |
| 585 | // FIXME: Code duplication: SETCC has custom operation action, see |
| 586 | // ARMTargetLowering::addTypeForNEON method for details. |
| 587 | setOperationAction(ISD::SETCC, MVT::v2f64, Expand); |
| 588 | // FIXME: Create unittest for FNEG and for FABS. |
| 589 | setOperationAction(ISD::FNEG, MVT::v2f64, Expand); |
| 590 | setOperationAction(ISD::FABS, MVT::v2f64, Expand); |
| 591 | setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); |
| 592 | setOperationAction(ISD::FSIN, MVT::v2f64, Expand); |
| 593 | setOperationAction(ISD::FCOS, MVT::v2f64, Expand); |
| 594 | setOperationAction(ISD::FPOW, MVT::v2f64, Expand); |
| 595 | setOperationAction(ISD::FLOG, MVT::v2f64, Expand); |
| 596 | setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); |
| 597 | setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); |
| 598 | setOperationAction(ISD::FEXP, MVT::v2f64, Expand); |
| 599 | setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); |
| 600 | // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. |
| 601 | setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); |
| 602 | setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); |
| 603 | setOperationAction(ISD::FRINT, MVT::v2f64, Expand); |
| 604 | setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); |
| 605 | setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); |
| 606 | setOperationAction(ISD::FMA, MVT::v2f64, Expand); |
| 607 | |
| 608 | setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); |
| 609 | setOperationAction(ISD::FSIN, MVT::v4f32, Expand); |
| 610 | setOperationAction(ISD::FCOS, MVT::v4f32, Expand); |
| 611 | setOperationAction(ISD::FPOW, MVT::v4f32, Expand); |
| 612 | setOperationAction(ISD::FLOG, MVT::v4f32, Expand); |
| 613 | setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); |
| 614 | setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); |
| 615 | setOperationAction(ISD::FEXP, MVT::v4f32, Expand); |
| 616 | setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); |
| 617 | setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); |
| 618 | setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); |
| 619 | setOperationAction(ISD::FRINT, MVT::v4f32, Expand); |
| 620 | setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); |
| 621 | setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); |
| 622 | |
| 623 | // Mark v2f32 intrinsics. |
| 624 | setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); |
| 625 | setOperationAction(ISD::FSIN, MVT::v2f32, Expand); |
| 626 | setOperationAction(ISD::FCOS, MVT::v2f32, Expand); |
| 627 | setOperationAction(ISD::FPOW, MVT::v2f32, Expand); |
| 628 | setOperationAction(ISD::FLOG, MVT::v2f32, Expand); |
| 629 | setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); |
| 630 | setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); |
| 631 | setOperationAction(ISD::FEXP, MVT::v2f32, Expand); |
| 632 | setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); |
| 633 | setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); |
| 634 | setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); |
| 635 | setOperationAction(ISD::FRINT, MVT::v2f32, Expand); |
| 636 | setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); |
| 637 | setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); |
| 638 | |
| 639 | // Neon does not support some operations on v1i64 and v2i64 types. |
| 640 | setOperationAction(ISD::MUL, MVT::v1i64, Expand); |
| 641 | // Custom handling for some quad-vector types to detect VMULL. |
| 642 | setOperationAction(ISD::MUL, MVT::v8i16, Custom); |
| 643 | setOperationAction(ISD::MUL, MVT::v4i32, Custom); |
| 644 | setOperationAction(ISD::MUL, MVT::v2i64, Custom); |
| 645 | // Custom handling for some vector types to avoid expensive expansions |
| 646 | setOperationAction(ISD::SDIV, MVT::v4i16, Custom); |
| 647 | setOperationAction(ISD::SDIV, MVT::v8i8, Custom); |
| 648 | setOperationAction(ISD::UDIV, MVT::v4i16, Custom); |
| 649 | setOperationAction(ISD::UDIV, MVT::v8i8, Custom); |
| 650 | // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with |
| 651 | // a destination type that is wider than the source, and nor does |
| 652 | // it have a FP_TO_[SU]INT instruction with a narrower destination than |
| 653 | // source. |
| 654 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); |
| 655 | setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom); |
| 656 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); |
| 657 | setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); |
| 658 | setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); |
| 659 | setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom); |
| 660 | setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); |
| 661 | setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); |
| 662 | |
| 663 | setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); |
| 664 | setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); |
| 665 | |
| 666 | // NEON does not have single instruction CTPOP for vectors with element |
| 667 | // types wider than 8-bits. However, custom lowering can leverage the |
| 668 | // v8i8/v16i8 vcnt instruction. |
| 669 | setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); |
| 670 | setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); |
| 671 | setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); |
| 672 | setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); |
| 673 | setOperationAction(ISD::CTPOP, MVT::v1i64, Custom); |
| 674 | setOperationAction(ISD::CTPOP, MVT::v2i64, Custom); |
| 675 | |
| 676 | setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); |
| 677 | setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); |
| 678 | |
| 679 | // NEON does not have single instruction CTTZ for vectors. |
| 680 | setOperationAction(ISD::CTTZ, MVT::v8i8, Custom); |
| 681 | setOperationAction(ISD::CTTZ, MVT::v4i16, Custom); |
| 682 | setOperationAction(ISD::CTTZ, MVT::v2i32, Custom); |
| 683 | setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); |
| 684 | |
| 685 | setOperationAction(ISD::CTTZ, MVT::v16i8, Custom); |
| 686 | setOperationAction(ISD::CTTZ, MVT::v8i16, Custom); |
| 687 | setOperationAction(ISD::CTTZ, MVT::v4i32, Custom); |
| 688 | setOperationAction(ISD::CTTZ, MVT::v2i64, Custom); |
| 689 | |
| 690 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom); |
| 691 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom); |
| 692 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom); |
| 693 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom); |
| 694 | |
| 695 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom); |
| 696 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom); |
| 697 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom); |
| 698 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom); |
| 699 | |
| 700 | // NEON only has FMA instructions as of VFP4. |
| 701 | if (!Subtarget->hasVFP4Base()) { |
| 702 | setOperationAction(ISD::FMA, MVT::v2f32, Expand); |
| 703 | setOperationAction(ISD::FMA, MVT::v4f32, Expand); |
| 704 | } |
| 705 | |
| 706 | setTargetDAGCombine(ISD::INTRINSIC_VOID); |
| 707 | setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); |
| 708 | setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); |
| 709 | setTargetDAGCombine(ISD::SHL); |
| 710 | setTargetDAGCombine(ISD::SRL); |
| 711 | setTargetDAGCombine(ISD::SRA); |
| 712 | setTargetDAGCombine(ISD::SIGN_EXTEND); |
| 713 | setTargetDAGCombine(ISD::ZERO_EXTEND); |
| 714 | setTargetDAGCombine(ISD::ANY_EXTEND); |
| 715 | setTargetDAGCombine(ISD::BUILD_VECTOR); |
| 716 | setTargetDAGCombine(ISD::VECTOR_SHUFFLE); |
| 717 | setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); |
| 718 | setTargetDAGCombine(ISD::STORE); |
| 719 | setTargetDAGCombine(ISD::FP_TO_SINT); |
| 720 | setTargetDAGCombine(ISD::FP_TO_UINT); |
| 721 | setTargetDAGCombine(ISD::FDIV); |
| 722 | setTargetDAGCombine(ISD::LOAD); |
| 723 | |
| 724 | // It is legal to extload from v4i8 to v4i16 or v4i32. |
| 725 | for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, |
| 726 | MVT::v2i32}) { |
| 727 | for (MVT VT : MVT::integer_vector_valuetypes()) { |
| 728 | setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal); |
| 729 | setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal); |
| 730 | setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal); |
| 731 | } |
| 732 | } |
| 733 | } |
| 734 | |
| 735 | if (!Subtarget->hasFP64()) { |
| 736 | // When targeting a floating-point unit with only single-precision |
| 737 | // operations, f64 is legal for the few double-precision instructions which |
| 738 | // are present However, no double-precision operations other than moves, |
| 739 | // loads and stores are provided by the hardware. |
| 740 | setOperationAction(ISD::FADD, MVT::f64, Expand); |
| 741 | setOperationAction(ISD::FSUB, MVT::f64, Expand); |
| 742 | setOperationAction(ISD::FMUL, MVT::f64, Expand); |
| 743 | setOperationAction(ISD::FMA, MVT::f64, Expand); |
| 744 | setOperationAction(ISD::FDIV, MVT::f64, Expand); |
| 745 | setOperationAction(ISD::FREM, MVT::f64, Expand); |
| 746 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); |
| 747 | setOperationAction(ISD::FGETSIGN, MVT::f64, Expand); |
| 748 | setOperationAction(ISD::FNEG, MVT::f64, Expand); |
| 749 | setOperationAction(ISD::FABS, MVT::f64, Expand); |
| 750 | setOperationAction(ISD::FSQRT, MVT::f64, Expand); |
| 751 | setOperationAction(ISD::FSIN, MVT::f64, Expand); |
| 752 | setOperationAction(ISD::FCOS, MVT::f64, Expand); |
| 753 | setOperationAction(ISD::FPOW, MVT::f64, Expand); |
| 754 | setOperationAction(ISD::FLOG, MVT::f64, Expand); |
| 755 | setOperationAction(ISD::FLOG2, MVT::f64, Expand); |
| 756 | setOperationAction(ISD::FLOG10, MVT::f64, Expand); |
| 757 | setOperationAction(ISD::FEXP, MVT::f64, Expand); |
| 758 | setOperationAction(ISD::FEXP2, MVT::f64, Expand); |
| 759 | setOperationAction(ISD::FCEIL, MVT::f64, Expand); |
| 760 | setOperationAction(ISD::FTRUNC, MVT::f64, Expand); |
| 761 | setOperationAction(ISD::FRINT, MVT::f64, Expand); |
| 762 | setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand); |
| 763 | setOperationAction(ISD::FFLOOR, MVT::f64, Expand); |
| 764 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); |
| 765 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); |
| 766 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); |
| 767 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); |
| 768 | setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom); |
| 769 | setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom); |
| 770 | setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); |
| 771 | setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom); |
| 772 | } |
| 773 | |
| 774 | computeRegisterProperties(Subtarget->getRegisterInfo()); |
| 775 | |
| 776 | // ARM does not have floating-point extending loads. |
| 777 | for (MVT VT : MVT::fp_valuetypes()) { |
| 778 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); |
| 779 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); |
| 780 | } |
| 781 | |
| 782 | // ... or truncating stores |
| 783 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); |
| 784 | setTruncStoreAction(MVT::f32, MVT::f16, Expand); |
| 785 | setTruncStoreAction(MVT::f64, MVT::f16, Expand); |
| 786 | |
| 787 | // ARM does not have i1 sign extending load. |
| 788 | for (MVT VT : MVT::integer_valuetypes()) |
| 789 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); |
| 790 | |
| 791 | // ARM supports all 4 flavors of integer indexed load / store. |
| 792 | if (!Subtarget->isThumb1Only()) { |
| 793 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 794 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 795 | setIndexedLoadAction(im, MVT::i1, Legal); |
| 796 | setIndexedLoadAction(im, MVT::i8, Legal); |
| 797 | setIndexedLoadAction(im, MVT::i16, Legal); |
| 798 | setIndexedLoadAction(im, MVT::i32, Legal); |
| 799 | setIndexedStoreAction(im, MVT::i1, Legal); |
| 800 | setIndexedStoreAction(im, MVT::i8, Legal); |
| 801 | setIndexedStoreAction(im, MVT::i16, Legal); |
| 802 | setIndexedStoreAction(im, MVT::i32, Legal); |
| 803 | } |
| 804 | } else { |
| 805 | // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}. |
| 806 | setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal); |
| 807 | setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal); |
| 808 | } |
| 809 | |
| 810 | setOperationAction(ISD::SADDO, MVT::i32, Custom); |
| 811 | setOperationAction(ISD::UADDO, MVT::i32, Custom); |
| 812 | setOperationAction(ISD::SSUBO, MVT::i32, Custom); |
| 813 | setOperationAction(ISD::USUBO, MVT::i32, Custom); |
| 814 | |
| 815 | setOperationAction(ISD::ADDCARRY, MVT::i32, Custom); |
| 816 | setOperationAction(ISD::SUBCARRY, MVT::i32, Custom); |
| 817 | |
| 818 | // i64 operation support. |
| 819 | setOperationAction(ISD::MUL, MVT::i64, Expand); |
| 820 | setOperationAction(ISD::MULHU, MVT::i32, Expand); |
| 821 | if (Subtarget->isThumb1Only()) { |
| 822 | setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); |
| 823 | setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); |
| 824 | } |
| 825 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() |
| 826 | || (Subtarget->isThumb2() && !Subtarget->hasDSP())) |
| 827 | setOperationAction(ISD::MULHS, MVT::i32, Expand); |
| 828 | |
| 829 | setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); |
| 830 | setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); |
| 831 | setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); |
| 832 | setOperationAction(ISD::SRL, MVT::i64, Custom); |
| 833 | setOperationAction(ISD::SRA, MVT::i64, Custom); |
| 834 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); |
| 835 | |
| 836 | // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1. |
| 837 | if (Subtarget->isThumb1Only()) { |
| 838 | setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); |
| 839 | setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); |
| 840 | setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); |
| 841 | } |
| 842 | |
| 843 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) |
| 844 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); |
| 845 | |
| 846 | // ARM does not have ROTL. |
| 847 | setOperationAction(ISD::ROTL, MVT::i32, Expand); |
| 848 | for (MVT VT : MVT::vector_valuetypes()) { |
| 849 | setOperationAction(ISD::ROTL, VT, Expand); |
| 850 | setOperationAction(ISD::ROTR, VT, Expand); |
| 851 | } |
| 852 | setOperationAction(ISD::CTTZ, MVT::i32, Custom); |
| 853 | setOperationAction(ISD::CTPOP, MVT::i32, Expand); |
| 854 | if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) { |
| 855 | setOperationAction(ISD::CTLZ, MVT::i32, Expand); |
| 856 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, LibCall); |
| 857 | } |
| 858 | |
| 859 | // @llvm.readcyclecounter requires the Performance Monitors extension. |
| 860 | // Default to the 0 expansion on unsupported platforms. |
| 861 | // FIXME: Technically there are older ARM CPUs that have |
| 862 | // implementation-specific ways of obtaining this information. |
| 863 | if (Subtarget->hasPerfMon()) |
| 864 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); |
| 865 | |
| 866 | // Only ARMv6 has BSWAP. |
| 867 | if (!Subtarget->hasV6Ops()) |
| 868 | setOperationAction(ISD::BSWAP, MVT::i32, Expand); |
| 869 | |
| 870 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
| 871 | : Subtarget->hasDivideInARMMode(); |
| 872 | if (!hasDivide) { |
| 873 | // These are expanded into libcalls if the cpu doesn't have HW divider. |
| 874 | setOperationAction(ISD::SDIV, MVT::i32, LibCall); |
| 875 | setOperationAction(ISD::UDIV, MVT::i32, LibCall); |
| 876 | } |
| 877 | |
| 878 | if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) { |
| 879 | setOperationAction(ISD::SDIV, MVT::i32, Custom); |
| 880 | setOperationAction(ISD::UDIV, MVT::i32, Custom); |
| 881 | |
| 882 | setOperationAction(ISD::SDIV, MVT::i64, Custom); |
| 883 | setOperationAction(ISD::UDIV, MVT::i64, Custom); |
| 884 | } |
| 885 | |
| 886 | setOperationAction(ISD::SREM, MVT::i32, Expand); |
| 887 | setOperationAction(ISD::UREM, MVT::i32, Expand); |
| 888 | |
| 889 | // Register based DivRem for AEABI (RTABI 4.2) |
| 890 | if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || |
| 891 | Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || |
| 892 | Subtarget->isTargetWindows()) { |
| 893 | setOperationAction(ISD::SREM, MVT::i64, Custom); |
| 894 | setOperationAction(ISD::UREM, MVT::i64, Custom); |
| 895 | HasStandaloneRem = false; |
| 896 | |
| 897 | if (Subtarget->isTargetWindows()) { |
| 898 | const struct { |
| 899 | const RTLIB::Libcall Op; |
| 900 | const char * const Name; |
| 901 | const CallingConv::ID CC; |
| 902 | } LibraryCalls[] = { |
| 903 | { RTLIB::SDIVREM_I8, "__rt_sdiv" , CallingConv::ARM_AAPCS }, |
| 904 | { RTLIB::SDIVREM_I16, "__rt_sdiv" , CallingConv::ARM_AAPCS }, |
| 905 | { RTLIB::SDIVREM_I32, "__rt_sdiv" , CallingConv::ARM_AAPCS }, |
| 906 | { RTLIB::SDIVREM_I64, "__rt_sdiv64" , CallingConv::ARM_AAPCS }, |
| 907 | |
| 908 | { RTLIB::UDIVREM_I8, "__rt_udiv" , CallingConv::ARM_AAPCS }, |
| 909 | { RTLIB::UDIVREM_I16, "__rt_udiv" , CallingConv::ARM_AAPCS }, |
| 910 | { RTLIB::UDIVREM_I32, "__rt_udiv" , CallingConv::ARM_AAPCS }, |
| 911 | { RTLIB::UDIVREM_I64, "__rt_udiv64" , CallingConv::ARM_AAPCS }, |
| 912 | }; |
| 913 | |
| 914 | for (const auto &LC : LibraryCalls) { |
| 915 | setLibcallName(LC.Op, LC.Name); |
| 916 | setLibcallCallingConv(LC.Op, LC.CC); |
| 917 | } |
| 918 | } else { |
| 919 | const struct { |
| 920 | const RTLIB::Libcall Op; |
| 921 | const char * const Name; |
| 922 | const CallingConv::ID CC; |
| 923 | } LibraryCalls[] = { |
| 924 | { RTLIB::SDIVREM_I8, "__aeabi_idivmod" , CallingConv::ARM_AAPCS }, |
| 925 | { RTLIB::SDIVREM_I16, "__aeabi_idivmod" , CallingConv::ARM_AAPCS }, |
| 926 | { RTLIB::SDIVREM_I32, "__aeabi_idivmod" , CallingConv::ARM_AAPCS }, |
| 927 | { RTLIB::SDIVREM_I64, "__aeabi_ldivmod" , CallingConv::ARM_AAPCS }, |
| 928 | |
| 929 | { RTLIB::UDIVREM_I8, "__aeabi_uidivmod" , CallingConv::ARM_AAPCS }, |
| 930 | { RTLIB::UDIVREM_I16, "__aeabi_uidivmod" , CallingConv::ARM_AAPCS }, |
| 931 | { RTLIB::UDIVREM_I32, "__aeabi_uidivmod" , CallingConv::ARM_AAPCS }, |
| 932 | { RTLIB::UDIVREM_I64, "__aeabi_uldivmod" , CallingConv::ARM_AAPCS }, |
| 933 | }; |
| 934 | |
| 935 | for (const auto &LC : LibraryCalls) { |
| 936 | setLibcallName(LC.Op, LC.Name); |
| 937 | setLibcallCallingConv(LC.Op, LC.CC); |
| 938 | } |
| 939 | } |
| 940 | |
| 941 | setOperationAction(ISD::SDIVREM, MVT::i32, Custom); |
| 942 | setOperationAction(ISD::UDIVREM, MVT::i32, Custom); |
| 943 | setOperationAction(ISD::SDIVREM, MVT::i64, Custom); |
| 944 | setOperationAction(ISD::UDIVREM, MVT::i64, Custom); |
| 945 | } else { |
| 946 | setOperationAction(ISD::SDIVREM, MVT::i32, Expand); |
| 947 | setOperationAction(ISD::UDIVREM, MVT::i32, Expand); |
| 948 | } |
| 949 | |
| 950 | if (Subtarget->isTargetWindows() && Subtarget->getTargetTriple().isOSMSVCRT()) |
| 951 | for (auto &VT : {MVT::f32, MVT::f64}) |
| 952 | setOperationAction(ISD::FPOWI, VT, Custom); |
| 953 | |
| 954 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); |
| 955 | setOperationAction(ISD::ConstantPool, MVT::i32, Custom); |
| 956 | setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); |
| 957 | setOperationAction(ISD::BlockAddress, MVT::i32, Custom); |
| 958 | |
| 959 | setOperationAction(ISD::TRAP, MVT::Other, Legal); |
| 960 | setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); |
| 961 | |
| 962 | // Use the default implementation. |
| 963 | setOperationAction(ISD::VASTART, MVT::Other, Custom); |
| 964 | setOperationAction(ISD::VAARG, MVT::Other, Expand); |
| 965 | setOperationAction(ISD::VACOPY, MVT::Other, Expand); |
| 966 | setOperationAction(ISD::VAEND, MVT::Other, Expand); |
| 967 | setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); |
| 968 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); |
| 969 | |
| 970 | if (Subtarget->isTargetWindows()) |
| 971 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); |
| 972 | else |
| 973 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); |
| 974 | |
| 975 | // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use |
| 976 | // the default expansion. |
| 977 | InsertFencesForAtomic = false; |
| 978 | if (Subtarget->hasAnyDataBarrier() && |
| 979 | (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { |
| 980 | // ATOMIC_FENCE needs custom lowering; the others should have been expanded |
| 981 | // to ldrex/strex loops already. |
| 982 | setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); |
| 983 | if (!Subtarget->isThumb() || !Subtarget->isMClass()) |
| 984 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); |
| 985 | |
| 986 | // On v8, we have particularly efficient implementations of atomic fences |
| 987 | // if they can be combined with nearby atomic loads and stores. |
| 988 | if (!Subtarget->hasAcquireRelease() || |
| 989 | getTargetMachine().getOptLevel() == 0) { |
| 990 | // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. |
| 991 | InsertFencesForAtomic = true; |
| 992 | } |
| 993 | } else { |
| 994 | // If there's anything we can use as a barrier, go through custom lowering |
| 995 | // for ATOMIC_FENCE. |
| 996 | // If target has DMB in thumb, Fences can be inserted. |
| 997 | if (Subtarget->hasDataBarrier()) |
| 998 | InsertFencesForAtomic = true; |
| 999 | |
| 1000 | setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, |
| 1001 | Subtarget->hasAnyDataBarrier() ? Custom : Expand); |
| 1002 | |
| 1003 | // Set them all for expansion, which will force libcalls. |
| 1004 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); |
| 1005 | setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); |
| 1006 | setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); |
| 1007 | setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); |
| 1008 | setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); |
| 1009 | setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); |
| 1010 | setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); |
| 1011 | setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); |
| 1012 | setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); |
| 1013 | setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); |
| 1014 | setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); |
| 1015 | setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); |
| 1016 | // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the |
| 1017 | // Unordered/Monotonic case. |
| 1018 | if (!InsertFencesForAtomic) { |
| 1019 | setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); |
| 1020 | setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); |
| 1021 | } |
| 1022 | } |
| 1023 | |
| 1024 | setOperationAction(ISD::PREFETCH, MVT::Other, Custom); |
| 1025 | |
| 1026 | // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. |
| 1027 | if (!Subtarget->hasV6Ops()) { |
| 1028 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); |
| 1029 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); |
| 1030 | } |
| 1031 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); |
| 1032 | |
| 1033 | if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && |
| 1034 | !Subtarget->isThumb1Only()) { |
| 1035 | // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR |
| 1036 | // iff target supports vfp2. |
| 1037 | setOperationAction(ISD::BITCAST, MVT::i64, Custom); |
| 1038 | setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); |
| 1039 | } |
| 1040 | |
| 1041 | // We want to custom lower some of our intrinsics. |
| 1042 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); |
| 1043 | setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); |
| 1044 | setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); |
| 1045 | setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); |
| 1046 | if (Subtarget->useSjLjEH()) |
| 1047 | setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume" ); |
| 1048 | |
| 1049 | setOperationAction(ISD::SETCC, MVT::i32, Expand); |
| 1050 | setOperationAction(ISD::SETCC, MVT::f32, Expand); |
| 1051 | setOperationAction(ISD::SETCC, MVT::f64, Expand); |
| 1052 | setOperationAction(ISD::SELECT, MVT::i32, Custom); |
| 1053 | setOperationAction(ISD::SELECT, MVT::f32, Custom); |
| 1054 | setOperationAction(ISD::SELECT, MVT::f64, Custom); |
| 1055 | setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); |
| 1056 | setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); |
| 1057 | setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); |
| 1058 | if (Subtarget->hasFullFP16()) { |
| 1059 | setOperationAction(ISD::SETCC, MVT::f16, Expand); |
| 1060 | setOperationAction(ISD::SELECT, MVT::f16, Custom); |
| 1061 | setOperationAction(ISD::SELECT_CC, MVT::f16, Custom); |
| 1062 | } |
| 1063 | |
| 1064 | setOperationAction(ISD::SETCCCARRY, MVT::i32, Custom); |
| 1065 | |
| 1066 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); |
| 1067 | setOperationAction(ISD::BR_CC, MVT::i32, Custom); |
| 1068 | if (Subtarget->hasFullFP16()) |
| 1069 | setOperationAction(ISD::BR_CC, MVT::f16, Custom); |
| 1070 | setOperationAction(ISD::BR_CC, MVT::f32, Custom); |
| 1071 | setOperationAction(ISD::BR_CC, MVT::f64, Custom); |
| 1072 | setOperationAction(ISD::BR_JT, MVT::Other, Custom); |
| 1073 | |
| 1074 | // We don't support sin/cos/fmod/copysign/pow |
| 1075 | setOperationAction(ISD::FSIN, MVT::f64, Expand); |
| 1076 | setOperationAction(ISD::FSIN, MVT::f32, Expand); |
| 1077 | setOperationAction(ISD::FCOS, MVT::f32, Expand); |
| 1078 | setOperationAction(ISD::FCOS, MVT::f64, Expand); |
| 1079 | setOperationAction(ISD::FSINCOS, MVT::f64, Expand); |
| 1080 | setOperationAction(ISD::FSINCOS, MVT::f32, Expand); |
| 1081 | setOperationAction(ISD::FREM, MVT::f64, Expand); |
| 1082 | setOperationAction(ISD::FREM, MVT::f32, Expand); |
| 1083 | if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && |
| 1084 | !Subtarget->isThumb1Only()) { |
| 1085 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); |
| 1086 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); |
| 1087 | } |
| 1088 | setOperationAction(ISD::FPOW, MVT::f64, Expand); |
| 1089 | setOperationAction(ISD::FPOW, MVT::f32, Expand); |
| 1090 | |
| 1091 | if (!Subtarget->hasVFP4Base()) { |
| 1092 | setOperationAction(ISD::FMA, MVT::f64, Expand); |
| 1093 | setOperationAction(ISD::FMA, MVT::f32, Expand); |
| 1094 | } |
| 1095 | |
| 1096 | // Various VFP goodness |
| 1097 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { |
| 1098 | // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. |
| 1099 | if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) { |
| 1100 | setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); |
| 1101 | setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); |
| 1102 | } |
| 1103 | |
| 1104 | // fp16 is a special v7 extension that adds f16 <-> f32 conversions. |
| 1105 | if (!Subtarget->hasFP16()) { |
| 1106 | setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); |
| 1107 | setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); |
| 1108 | } |
| 1109 | } |
| 1110 | |
| 1111 | // Use __sincos_stret if available. |
| 1112 | if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && |
| 1113 | getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { |
| 1114 | setOperationAction(ISD::FSINCOS, MVT::f64, Custom); |
| 1115 | setOperationAction(ISD::FSINCOS, MVT::f32, Custom); |
| 1116 | } |
| 1117 | |
| 1118 | // FP-ARMv8 implements a lot of rounding-like FP operations. |
| 1119 | if (Subtarget->hasFPARMv8Base()) { |
| 1120 | setOperationAction(ISD::FFLOOR, MVT::f32, Legal); |
| 1121 | setOperationAction(ISD::FCEIL, MVT::f32, Legal); |
| 1122 | setOperationAction(ISD::FROUND, MVT::f32, Legal); |
| 1123 | setOperationAction(ISD::FTRUNC, MVT::f32, Legal); |
| 1124 | setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); |
| 1125 | setOperationAction(ISD::FRINT, MVT::f32, Legal); |
| 1126 | setOperationAction(ISD::FMINNUM, MVT::f32, Legal); |
| 1127 | setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); |
| 1128 | setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal); |
| 1129 | setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal); |
| 1130 | setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); |
| 1131 | setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); |
| 1132 | |
| 1133 | if (Subtarget->hasFP64()) { |
| 1134 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); |
| 1135 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); |
| 1136 | setOperationAction(ISD::FROUND, MVT::f64, Legal); |
| 1137 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); |
| 1138 | setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); |
| 1139 | setOperationAction(ISD::FRINT, MVT::f64, Legal); |
| 1140 | setOperationAction(ISD::FMINNUM, MVT::f64, Legal); |
| 1141 | setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); |
| 1142 | } |
| 1143 | } |
| 1144 | |
| 1145 | // FP16 often need to be promoted to call lib functions |
| 1146 | if (Subtarget->hasFullFP16()) { |
| 1147 | setOperationAction(ISD::FREM, MVT::f16, Promote); |
| 1148 | setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand); |
| 1149 | setOperationAction(ISD::FSIN, MVT::f16, Promote); |
| 1150 | setOperationAction(ISD::FCOS, MVT::f16, Promote); |
| 1151 | setOperationAction(ISD::FSINCOS, MVT::f16, Promote); |
| 1152 | setOperationAction(ISD::FPOWI, MVT::f16, Promote); |
| 1153 | setOperationAction(ISD::FPOW, MVT::f16, Promote); |
| 1154 | setOperationAction(ISD::FEXP, MVT::f16, Promote); |
| 1155 | setOperationAction(ISD::FEXP2, MVT::f16, Promote); |
| 1156 | setOperationAction(ISD::FLOG, MVT::f16, Promote); |
| 1157 | setOperationAction(ISD::FLOG10, MVT::f16, Promote); |
| 1158 | setOperationAction(ISD::FLOG2, MVT::f16, Promote); |
| 1159 | |
| 1160 | setOperationAction(ISD::FROUND, MVT::f16, Legal); |
| 1161 | } |
| 1162 | |
| 1163 | if (Subtarget->hasNEON()) { |
| 1164 | // vmin and vmax aren't available in a scalar form, so we use |
| 1165 | // a NEON instruction with an undef lane instead. |
| 1166 | setOperationAction(ISD::FMINIMUM, MVT::f16, Legal); |
| 1167 | setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal); |
| 1168 | setOperationAction(ISD::FMINIMUM, MVT::f32, Legal); |
| 1169 | setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal); |
| 1170 | setOperationAction(ISD::FMINIMUM, MVT::v2f32, Legal); |
| 1171 | setOperationAction(ISD::FMAXIMUM, MVT::v2f32, Legal); |
| 1172 | setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal); |
| 1173 | setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal); |
| 1174 | |
| 1175 | if (Subtarget->hasFullFP16()) { |
| 1176 | setOperationAction(ISD::FMINNUM, MVT::v4f16, Legal); |
| 1177 | setOperationAction(ISD::FMAXNUM, MVT::v4f16, Legal); |
| 1178 | setOperationAction(ISD::FMINNUM, MVT::v8f16, Legal); |
| 1179 | setOperationAction(ISD::FMAXNUM, MVT::v8f16, Legal); |
| 1180 | |
| 1181 | setOperationAction(ISD::FMINIMUM, MVT::v4f16, Legal); |
| 1182 | setOperationAction(ISD::FMAXIMUM, MVT::v4f16, Legal); |
| 1183 | setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal); |
| 1184 | setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal); |
| 1185 | } |
| 1186 | } |
| 1187 | |
| 1188 | // We have target-specific dag combine patterns for the following nodes: |
| 1189 | // ARMISD::VMOVRRD - No need to call setTargetDAGCombine |
| 1190 | setTargetDAGCombine(ISD::ADD); |
| 1191 | setTargetDAGCombine(ISD::SUB); |
| 1192 | setTargetDAGCombine(ISD::MUL); |
| 1193 | setTargetDAGCombine(ISD::AND); |
| 1194 | setTargetDAGCombine(ISD::OR); |
| 1195 | setTargetDAGCombine(ISD::XOR); |
| 1196 | |
| 1197 | if (Subtarget->hasV6Ops()) |
| 1198 | setTargetDAGCombine(ISD::SRL); |
| 1199 | if (Subtarget->isThumb1Only()) |
| 1200 | setTargetDAGCombine(ISD::SHL); |
| 1201 | |
| 1202 | setStackPointerRegisterToSaveRestore(ARM::SP); |
| 1203 | |
| 1204 | if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || |
| 1205 | !Subtarget->hasVFP2Base() || Subtarget->hasMinSize()) |
| 1206 | setSchedulingPreference(Sched::RegPressure); |
| 1207 | else |
| 1208 | setSchedulingPreference(Sched::Hybrid); |
| 1209 | |
| 1210 | //// temporary - rewrite interface to use type |
| 1211 | MaxStoresPerMemset = 8; |
| 1212 | MaxStoresPerMemsetOptSize = 4; |
| 1213 | MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores |
| 1214 | MaxStoresPerMemcpyOptSize = 2; |
| 1215 | MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores |
| 1216 | MaxStoresPerMemmoveOptSize = 2; |
| 1217 | |
| 1218 | // On ARM arguments smaller than 4 bytes are extended, so all arguments |
| 1219 | // are at least 4 bytes aligned. |
| 1220 | setMinStackArgumentAlignment(4); |
| 1221 | |
| 1222 | // Prefer likely predicted branches to selects on out-of-order cores. |
| 1223 | PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); |
| 1224 | |
| 1225 | setPrefLoopAlignment(Subtarget->getPrefLoopAlignment()); |
| 1226 | |
| 1227 | setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); |
| 1228 | |
| 1229 | if (Subtarget->isThumb() || Subtarget->isThumb2()) |
| 1230 | setTargetDAGCombine(ISD::ABS); |
| 1231 | } |
| 1232 | |
| 1233 | bool ARMTargetLowering::useSoftFloat() const { |
| 1234 | return Subtarget->useSoftFloat(); |
| 1235 | } |
| 1236 | |
| 1237 | // FIXME: It might make sense to define the representative register class as the |
| 1238 | // nearest super-register that has a non-null superset. For example, DPR_VFP2 is |
| 1239 | // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, |
| 1240 | // SPR's representative would be DPR_VFP2. This should work well if register |
| 1241 | // pressure tracking were modified such that a register use would increment the |
| 1242 | // pressure of the register class's representative and all of it's super |
| 1243 | // classes' representatives transitively. We have not implemented this because |
| 1244 | // of the difficulty prior to coalescing of modeling operand register classes |
| 1245 | // due to the common occurrence of cross class copies and subregister insertions |
| 1246 | // and extractions. |
| 1247 | std::pair<const TargetRegisterClass *, uint8_t> |
| 1248 | ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, |
| 1249 | MVT VT) const { |
| 1250 | const TargetRegisterClass *RRC = nullptr; |
| 1251 | uint8_t Cost = 1; |
| 1252 | switch (VT.SimpleTy) { |
| 1253 | default: |
| 1254 | return TargetLowering::findRepresentativeClass(TRI, VT); |
| 1255 | // Use DPR as representative register class for all floating point |
| 1256 | // and vector types. Since there are 32 SPR registers and 32 DPR registers so |
| 1257 | // the cost is 1 for both f32 and f64. |
| 1258 | case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: |
| 1259 | case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: |
| 1260 | RRC = &ARM::DPRRegClass; |
| 1261 | // When NEON is used for SP, only half of the register file is available |
| 1262 | // because operations that define both SP and DP results will be constrained |
| 1263 | // to the VFP2 class (D0-D15). We currently model this constraint prior to |
| 1264 | // coalescing by double-counting the SP regs. See the FIXME above. |
| 1265 | if (Subtarget->useNEONForSinglePrecisionFP()) |
| 1266 | Cost = 2; |
| 1267 | break; |
| 1268 | case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: |
| 1269 | case MVT::v4f32: case MVT::v2f64: |
| 1270 | RRC = &ARM::DPRRegClass; |
| 1271 | Cost = 2; |
| 1272 | break; |
| 1273 | case MVT::v4i64: |
| 1274 | RRC = &ARM::DPRRegClass; |
| 1275 | Cost = 4; |
| 1276 | break; |
| 1277 | case MVT::v8i64: |
| 1278 | RRC = &ARM::DPRRegClass; |
| 1279 | Cost = 8; |
| 1280 | break; |
| 1281 | } |
| 1282 | return std::make_pair(RRC, Cost); |
| 1283 | } |
| 1284 | |
| 1285 | const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { |
| 1286 | switch ((ARMISD::NodeType)Opcode) { |
| 1287 | case ARMISD::FIRST_NUMBER: break; |
| 1288 | case ARMISD::Wrapper: return "ARMISD::Wrapper" ; |
| 1289 | case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC" ; |
| 1290 | case ARMISD::WrapperJT: return "ARMISD::WrapperJT" ; |
| 1291 | case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL" ; |
| 1292 | case ARMISD::CALL: return "ARMISD::CALL" ; |
| 1293 | case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED" ; |
| 1294 | case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK" ; |
| 1295 | case ARMISD::BRCOND: return "ARMISD::BRCOND" ; |
| 1296 | case ARMISD::BR_JT: return "ARMISD::BR_JT" ; |
| 1297 | case ARMISD::BR2_JT: return "ARMISD::BR2_JT" ; |
| 1298 | case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG" ; |
| 1299 | case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG" ; |
| 1300 | case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD" ; |
| 1301 | case ARMISD::CMP: return "ARMISD::CMP" ; |
| 1302 | case ARMISD::CMN: return "ARMISD::CMN" ; |
| 1303 | case ARMISD::CMPZ: return "ARMISD::CMPZ" ; |
| 1304 | case ARMISD::CMPFP: return "ARMISD::CMPFP" ; |
| 1305 | case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0" ; |
| 1306 | case ARMISD::BCC_i64: return "ARMISD::BCC_i64" ; |
| 1307 | case ARMISD::FMSTAT: return "ARMISD::FMSTAT" ; |
| 1308 | |
| 1309 | case ARMISD::CMOV: return "ARMISD::CMOV" ; |
| 1310 | case ARMISD::SUBS: return "ARMISD::SUBS" ; |
| 1311 | |
| 1312 | case ARMISD::SSAT: return "ARMISD::SSAT" ; |
| 1313 | case ARMISD::USAT: return "ARMISD::USAT" ; |
| 1314 | |
| 1315 | case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG" ; |
| 1316 | case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG" ; |
| 1317 | case ARMISD::RRX: return "ARMISD::RRX" ; |
| 1318 | |
| 1319 | case ARMISD::ADDC: return "ARMISD::ADDC" ; |
| 1320 | case ARMISD::ADDE: return "ARMISD::ADDE" ; |
| 1321 | case ARMISD::SUBC: return "ARMISD::SUBC" ; |
| 1322 | case ARMISD::SUBE: return "ARMISD::SUBE" ; |
| 1323 | |
| 1324 | case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD" ; |
| 1325 | case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR" ; |
| 1326 | case ARMISD::VMOVhr: return "ARMISD::VMOVhr" ; |
| 1327 | case ARMISD::VMOVrh: return "ARMISD::VMOVrh" ; |
| 1328 | case ARMISD::VMOVSR: return "ARMISD::VMOVSR" ; |
| 1329 | |
| 1330 | case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP" ; |
| 1331 | case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP" ; |
| 1332 | case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH" ; |
| 1333 | |
| 1334 | case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN" ; |
| 1335 | |
| 1336 | case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER" ; |
| 1337 | |
| 1338 | case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC" ; |
| 1339 | |
| 1340 | case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR" ; |
| 1341 | |
| 1342 | case ARMISD::PRELOAD: return "ARMISD::PRELOAD" ; |
| 1343 | |
| 1344 | case ARMISD::WIN__CHKSTK: return "ARMISD::WIN__CHKSTK" ; |
| 1345 | case ARMISD::WIN__DBZCHK: return "ARMISD::WIN__DBZCHK" ; |
| 1346 | |
| 1347 | case ARMISD::VCEQ: return "ARMISD::VCEQ" ; |
| 1348 | case ARMISD::VCEQZ: return "ARMISD::VCEQZ" ; |
| 1349 | case ARMISD::VCGE: return "ARMISD::VCGE" ; |
| 1350 | case ARMISD::VCGEZ: return "ARMISD::VCGEZ" ; |
| 1351 | case ARMISD::VCLEZ: return "ARMISD::VCLEZ" ; |
| 1352 | case ARMISD::VCGEU: return "ARMISD::VCGEU" ; |
| 1353 | case ARMISD::VCGT: return "ARMISD::VCGT" ; |
| 1354 | case ARMISD::VCGTZ: return "ARMISD::VCGTZ" ; |
| 1355 | case ARMISD::VCLTZ: return "ARMISD::VCLTZ" ; |
| 1356 | case ARMISD::VCGTU: return "ARMISD::VCGTU" ; |
| 1357 | case ARMISD::VTST: return "ARMISD::VTST" ; |
| 1358 | |
| 1359 | case ARMISD::VSHL: return "ARMISD::VSHL" ; |
| 1360 | case ARMISD::VSHRs: return "ARMISD::VSHRs" ; |
| 1361 | case ARMISD::VSHRu: return "ARMISD::VSHRu" ; |
| 1362 | case ARMISD::VRSHRs: return "ARMISD::VRSHRs" ; |
| 1363 | case ARMISD::VRSHRu: return "ARMISD::VRSHRu" ; |
| 1364 | case ARMISD::VRSHRN: return "ARMISD::VRSHRN" ; |
| 1365 | case ARMISD::VQSHLs: return "ARMISD::VQSHLs" ; |
| 1366 | case ARMISD::VQSHLu: return "ARMISD::VQSHLu" ; |
| 1367 | case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu" ; |
| 1368 | case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs" ; |
| 1369 | case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu" ; |
| 1370 | case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu" ; |
| 1371 | case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs" ; |
| 1372 | case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu" ; |
| 1373 | case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu" ; |
| 1374 | case ARMISD::VSLI: return "ARMISD::VSLI" ; |
| 1375 | case ARMISD::VSRI: return "ARMISD::VSRI" ; |
| 1376 | case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu" ; |
| 1377 | case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs" ; |
| 1378 | case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM" ; |
| 1379 | case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM" ; |
| 1380 | case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM" ; |
| 1381 | case ARMISD::VDUP: return "ARMISD::VDUP" ; |
| 1382 | case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE" ; |
| 1383 | case ARMISD::VEXT: return "ARMISD::VEXT" ; |
| 1384 | case ARMISD::VREV64: return "ARMISD::VREV64" ; |
| 1385 | case ARMISD::VREV32: return "ARMISD::VREV32" ; |
| 1386 | case ARMISD::VREV16: return "ARMISD::VREV16" ; |
| 1387 | case ARMISD::VZIP: return "ARMISD::VZIP" ; |
| 1388 | case ARMISD::VUZP: return "ARMISD::VUZP" ; |
| 1389 | case ARMISD::VTRN: return "ARMISD::VTRN" ; |
| 1390 | case ARMISD::VTBL1: return "ARMISD::VTBL1" ; |
| 1391 | case ARMISD::VTBL2: return "ARMISD::VTBL2" ; |
| 1392 | case ARMISD::VMULLs: return "ARMISD::VMULLs" ; |
| 1393 | case ARMISD::VMULLu: return "ARMISD::VMULLu" ; |
| 1394 | case ARMISD::UMAAL: return "ARMISD::UMAAL" ; |
| 1395 | case ARMISD::UMLAL: return "ARMISD::UMLAL" ; |
| 1396 | case ARMISD::SMLAL: return "ARMISD::SMLAL" ; |
| 1397 | case ARMISD::SMLALBB: return "ARMISD::SMLALBB" ; |
| 1398 | case ARMISD::SMLALBT: return "ARMISD::SMLALBT" ; |
| 1399 | case ARMISD::SMLALTB: return "ARMISD::SMLALTB" ; |
| 1400 | case ARMISD::SMLALTT: return "ARMISD::SMLALTT" ; |
| 1401 | case ARMISD::SMULWB: return "ARMISD::SMULWB" ; |
| 1402 | case ARMISD::SMULWT: return "ARMISD::SMULWT" ; |
| 1403 | case ARMISD::SMLALD: return "ARMISD::SMLALD" ; |
| 1404 | case ARMISD::SMLALDX: return "ARMISD::SMLALDX" ; |
| 1405 | case ARMISD::SMLSLD: return "ARMISD::SMLSLD" ; |
| 1406 | case ARMISD::SMLSLDX: return "ARMISD::SMLSLDX" ; |
| 1407 | case ARMISD::SMMLAR: return "ARMISD::SMMLAR" ; |
| 1408 | case ARMISD::SMMLSR: return "ARMISD::SMMLSR" ; |
| 1409 | case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR" ; |
| 1410 | case ARMISD::BFI: return "ARMISD::BFI" ; |
| 1411 | case ARMISD::VORRIMM: return "ARMISD::VORRIMM" ; |
| 1412 | case ARMISD::VBICIMM: return "ARMISD::VBICIMM" ; |
| 1413 | case ARMISD::VBSL: return "ARMISD::VBSL" ; |
| 1414 | case ARMISD::MEMCPY: return "ARMISD::MEMCPY" ; |
| 1415 | case ARMISD::VLD1DUP: return "ARMISD::VLD1DUP" ; |
| 1416 | case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP" ; |
| 1417 | case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP" ; |
| 1418 | case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP" ; |
| 1419 | case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD" ; |
| 1420 | case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD" ; |
| 1421 | case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD" ; |
| 1422 | case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD" ; |
| 1423 | case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD" ; |
| 1424 | case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD" ; |
| 1425 | case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD" ; |
| 1426 | case ARMISD::VLD1DUP_UPD: return "ARMISD::VLD1DUP_UPD" ; |
| 1427 | case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD" ; |
| 1428 | case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD" ; |
| 1429 | case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD" ; |
| 1430 | case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD" ; |
| 1431 | case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD" ; |
| 1432 | case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD" ; |
| 1433 | case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD" ; |
| 1434 | case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD" ; |
| 1435 | case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD" ; |
| 1436 | case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD" ; |
| 1437 | } |
| 1438 | return nullptr; |
| 1439 | } |
| 1440 | |
| 1441 | EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, |
| 1442 | EVT VT) const { |
| 1443 | if (!VT.isVector()) |
| 1444 | return getPointerTy(DL); |
| 1445 | return VT.changeVectorElementTypeToInteger(); |
| 1446 | } |
| 1447 | |
| 1448 | /// getRegClassFor - Return the register class that should be used for the |
| 1449 | /// specified value type. |
| 1450 | const TargetRegisterClass * |
| 1451 | ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { |
| 1452 | (void)isDivergent; |
| 1453 | // Map v4i64 to QQ registers but do not make the type legal. Similarly map |
| 1454 | // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to |
| 1455 | // load / store 4 to 8 consecutive D registers. |
| 1456 | if (Subtarget->hasNEON()) { |
| 1457 | if (VT == MVT::v4i64) |
| 1458 | return &ARM::QQPRRegClass; |
| 1459 | if (VT == MVT::v8i64) |
| 1460 | return &ARM::QQQQPRRegClass; |
| 1461 | } |
| 1462 | return TargetLowering::getRegClassFor(VT); |
| 1463 | } |
| 1464 | |
| 1465 | // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the |
| 1466 | // source/dest is aligned and the copy size is large enough. We therefore want |
| 1467 | // to align such objects passed to memory intrinsics. |
| 1468 | bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, |
| 1469 | unsigned &PrefAlign) const { |
| 1470 | if (!isa<MemIntrinsic>(CI)) |
| 1471 | return false; |
| 1472 | MinSize = 8; |
| 1473 | // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 |
| 1474 | // cycle faster than 4-byte aligned LDM. |
| 1475 | PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4); |
| 1476 | return true; |
| 1477 | } |
| 1478 | |
| 1479 | // Create a fast isel object. |
| 1480 | FastISel * |
| 1481 | ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, |
| 1482 | const TargetLibraryInfo *libInfo) const { |
| 1483 | return ARM::createFastISel(funcInfo, libInfo); |
| 1484 | } |
| 1485 | |
| 1486 | Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { |
| 1487 | unsigned NumVals = N->getNumValues(); |
| 1488 | if (!NumVals) |
| 1489 | return Sched::RegPressure; |
| 1490 | |
| 1491 | for (unsigned i = 0; i != NumVals; ++i) { |
| 1492 | EVT VT = N->getValueType(i); |
| 1493 | if (VT == MVT::Glue || VT == MVT::Other) |
| 1494 | continue; |
| 1495 | if (VT.isFloatingPoint() || VT.isVector()) |
| 1496 | return Sched::ILP; |
| 1497 | } |
| 1498 | |
| 1499 | if (!N->isMachineOpcode()) |
| 1500 | return Sched::RegPressure; |
| 1501 | |
| 1502 | // Load are scheduled for latency even if there instruction itinerary |
| 1503 | // is not available. |
| 1504 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 1505 | const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); |
| 1506 | |
| 1507 | if (MCID.getNumDefs() == 0) |
| 1508 | return Sched::RegPressure; |
| 1509 | if (!Itins->isEmpty() && |
| 1510 | Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) |
| 1511 | return Sched::ILP; |
| 1512 | |
| 1513 | return Sched::RegPressure; |
| 1514 | } |
| 1515 | |
| 1516 | //===----------------------------------------------------------------------===// |
| 1517 | // Lowering Code |
| 1518 | //===----------------------------------------------------------------------===// |
| 1519 | |
| 1520 | static bool isSRL16(const SDValue &Op) { |
| 1521 | if (Op.getOpcode() != ISD::SRL) |
| 1522 | return false; |
| 1523 | if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) |
| 1524 | return Const->getZExtValue() == 16; |
| 1525 | return false; |
| 1526 | } |
| 1527 | |
| 1528 | static bool isSRA16(const SDValue &Op) { |
| 1529 | if (Op.getOpcode() != ISD::SRA) |
| 1530 | return false; |
| 1531 | if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) |
| 1532 | return Const->getZExtValue() == 16; |
| 1533 | return false; |
| 1534 | } |
| 1535 | |
| 1536 | static bool isSHL16(const SDValue &Op) { |
| 1537 | if (Op.getOpcode() != ISD::SHL) |
| 1538 | return false; |
| 1539 | if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) |
| 1540 | return Const->getZExtValue() == 16; |
| 1541 | return false; |
| 1542 | } |
| 1543 | |
| 1544 | // Check for a signed 16-bit value. We special case SRA because it makes it |
| 1545 | // more simple when also looking for SRAs that aren't sign extending a |
| 1546 | // smaller value. Without the check, we'd need to take extra care with |
| 1547 | // checking order for some operations. |
| 1548 | static bool isS16(const SDValue &Op, SelectionDAG &DAG) { |
| 1549 | if (isSRA16(Op)) |
| 1550 | return isSHL16(Op.getOperand(0)); |
| 1551 | return DAG.ComputeNumSignBits(Op) == 17; |
| 1552 | } |
| 1553 | |
| 1554 | /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC |
| 1555 | static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { |
| 1556 | switch (CC) { |
| 1557 | default: llvm_unreachable("Unknown condition code!" ); |
| 1558 | case ISD::SETNE: return ARMCC::NE; |
| 1559 | case ISD::SETEQ: return ARMCC::EQ; |
| 1560 | case ISD::SETGT: return ARMCC::GT; |
| 1561 | case ISD::SETGE: return ARMCC::GE; |
| 1562 | case ISD::SETLT: return ARMCC::LT; |
| 1563 | case ISD::SETLE: return ARMCC::LE; |
| 1564 | case ISD::SETUGT: return ARMCC::HI; |
| 1565 | case ISD::SETUGE: return ARMCC::HS; |
| 1566 | case ISD::SETULT: return ARMCC::LO; |
| 1567 | case ISD::SETULE: return ARMCC::LS; |
| 1568 | } |
| 1569 | } |
| 1570 | |
| 1571 | /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. |
| 1572 | static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
| 1573 | ARMCC::CondCodes &CondCode2, bool &InvalidOnQNaN) { |
| 1574 | CondCode2 = ARMCC::AL; |
| 1575 | InvalidOnQNaN = true; |
| 1576 | switch (CC) { |
| 1577 | default: llvm_unreachable("Unknown FP condition!" ); |
| 1578 | case ISD::SETEQ: |
| 1579 | case ISD::SETOEQ: |
| 1580 | CondCode = ARMCC::EQ; |
| 1581 | InvalidOnQNaN = false; |
| 1582 | break; |
| 1583 | case ISD::SETGT: |
| 1584 | case ISD::SETOGT: CondCode = ARMCC::GT; break; |
| 1585 | case ISD::SETGE: |
| 1586 | case ISD::SETOGE: CondCode = ARMCC::GE; break; |
| 1587 | case ISD::SETOLT: CondCode = ARMCC::MI; break; |
| 1588 | case ISD::SETOLE: CondCode = ARMCC::LS; break; |
| 1589 | case ISD::SETONE: |
| 1590 | CondCode = ARMCC::MI; |
| 1591 | CondCode2 = ARMCC::GT; |
| 1592 | InvalidOnQNaN = false; |
| 1593 | break; |
| 1594 | case ISD::SETO: CondCode = ARMCC::VC; break; |
| 1595 | case ISD::SETUO: CondCode = ARMCC::VS; break; |
| 1596 | case ISD::SETUEQ: |
| 1597 | CondCode = ARMCC::EQ; |
| 1598 | CondCode2 = ARMCC::VS; |
| 1599 | InvalidOnQNaN = false; |
| 1600 | break; |
| 1601 | case ISD::SETUGT: CondCode = ARMCC::HI; break; |
| 1602 | case ISD::SETUGE: CondCode = ARMCC::PL; break; |
| 1603 | case ISD::SETLT: |
| 1604 | case ISD::SETULT: CondCode = ARMCC::LT; break; |
| 1605 | case ISD::SETLE: |
| 1606 | case ISD::SETULE: CondCode = ARMCC::LE; break; |
| 1607 | case ISD::SETNE: |
| 1608 | case ISD::SETUNE: |
| 1609 | CondCode = ARMCC::NE; |
| 1610 | InvalidOnQNaN = false; |
| 1611 | break; |
| 1612 | } |
| 1613 | } |
| 1614 | |
| 1615 | //===----------------------------------------------------------------------===// |
| 1616 | // Calling Convention Implementation |
| 1617 | //===----------------------------------------------------------------------===// |
| 1618 | |
| 1619 | /// getEffectiveCallingConv - Get the effective calling convention, taking into |
| 1620 | /// account presence of floating point hardware and calling convention |
| 1621 | /// limitations, such as support for variadic functions. |
| 1622 | CallingConv::ID |
| 1623 | ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, |
| 1624 | bool isVarArg) const { |
| 1625 | switch (CC) { |
| 1626 | default: |
| 1627 | report_fatal_error("Unsupported calling convention" ); |
| 1628 | case CallingConv::ARM_AAPCS: |
| 1629 | case CallingConv::ARM_APCS: |
| 1630 | case CallingConv::GHC: |
| 1631 | return CC; |
| 1632 | case CallingConv::PreserveMost: |
| 1633 | return CallingConv::PreserveMost; |
| 1634 | case CallingConv::ARM_AAPCS_VFP: |
| 1635 | case CallingConv::Swift: |
| 1636 | return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; |
| 1637 | case CallingConv::C: |
| 1638 | if (!Subtarget->isAAPCS_ABI()) |
| 1639 | return CallingConv::ARM_APCS; |
| 1640 | else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && |
| 1641 | getTargetMachine().Options.FloatABIType == FloatABI::Hard && |
| 1642 | !isVarArg) |
| 1643 | return CallingConv::ARM_AAPCS_VFP; |
| 1644 | else |
| 1645 | return CallingConv::ARM_AAPCS; |
| 1646 | case CallingConv::Fast: |
| 1647 | case CallingConv::CXX_FAST_TLS: |
| 1648 | if (!Subtarget->isAAPCS_ABI()) { |
| 1649 | if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg) |
| 1650 | return CallingConv::Fast; |
| 1651 | return CallingConv::ARM_APCS; |
| 1652 | } else if (Subtarget->hasVFP2Base() && |
| 1653 | !Subtarget->isThumb1Only() && !isVarArg) |
| 1654 | return CallingConv::ARM_AAPCS_VFP; |
| 1655 | else |
| 1656 | return CallingConv::ARM_AAPCS; |
| 1657 | } |
| 1658 | } |
| 1659 | |
| 1660 | CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC, |
| 1661 | bool isVarArg) const { |
| 1662 | return CCAssignFnForNode(CC, false, isVarArg); |
| 1663 | } |
| 1664 | |
| 1665 | CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, |
| 1666 | bool isVarArg) const { |
| 1667 | return CCAssignFnForNode(CC, true, isVarArg); |
| 1668 | } |
| 1669 | |
| 1670 | /// CCAssignFnForNode - Selects the correct CCAssignFn for the given |
| 1671 | /// CallingConvention. |
| 1672 | CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, |
| 1673 | bool Return, |
| 1674 | bool isVarArg) const { |
| 1675 | switch (getEffectiveCallingConv(CC, isVarArg)) { |
| 1676 | default: |
| 1677 | report_fatal_error("Unsupported calling convention" ); |
| 1678 | case CallingConv::ARM_APCS: |
| 1679 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); |
| 1680 | case CallingConv::ARM_AAPCS: |
| 1681 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
| 1682 | case CallingConv::ARM_AAPCS_VFP: |
| 1683 | return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); |
| 1684 | case CallingConv::Fast: |
| 1685 | return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); |
| 1686 | case CallingConv::GHC: |
| 1687 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); |
| 1688 | case CallingConv::PreserveMost: |
| 1689 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
| 1690 | } |
| 1691 | } |
| 1692 | |
| 1693 | /// LowerCallResult - Lower the result values of a call into the |
| 1694 | /// appropriate copies out of appropriate physical registers. |
| 1695 | SDValue ARMTargetLowering::LowerCallResult( |
| 1696 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, |
| 1697 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 1698 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, |
| 1699 | SDValue ThisVal) const { |
| 1700 | // Assign locations to each value returned by this call. |
| 1701 | SmallVector<CCValAssign, 16> RVLocs; |
| 1702 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
| 1703 | *DAG.getContext()); |
| 1704 | CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg)); |
| 1705 | |
| 1706 | // Copy all of the result registers out of their specified physreg. |
| 1707 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
| 1708 | CCValAssign VA = RVLocs[i]; |
| 1709 | |
| 1710 | // Pass 'this' value directly from the argument to return value, to avoid |
| 1711 | // reg unit interference |
| 1712 | if (i == 0 && isThisReturn) { |
| 1713 | assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && |
| 1714 | "unexpected return calling convention register assignment" ); |
| 1715 | InVals.push_back(ThisVal); |
| 1716 | continue; |
| 1717 | } |
| 1718 | |
| 1719 | SDValue Val; |
| 1720 | if (VA.needsCustom()) { |
| 1721 | // Handle f64 or half of a v2f64. |
| 1722 | SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, |
| 1723 | InFlag); |
| 1724 | Chain = Lo.getValue(1); |
| 1725 | InFlag = Lo.getValue(2); |
| 1726 | VA = RVLocs[++i]; // skip ahead to next loc |
| 1727 | SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, |
| 1728 | InFlag); |
| 1729 | Chain = Hi.getValue(1); |
| 1730 | InFlag = Hi.getValue(2); |
| 1731 | if (!Subtarget->isLittle()) |
| 1732 | std::swap (Lo, Hi); |
| 1733 | Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); |
| 1734 | |
| 1735 | if (VA.getLocVT() == MVT::v2f64) { |
| 1736 | SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); |
| 1737 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, |
| 1738 | DAG.getConstant(0, dl, MVT::i32)); |
| 1739 | |
| 1740 | VA = RVLocs[++i]; // skip ahead to next loc |
| 1741 | Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); |
| 1742 | Chain = Lo.getValue(1); |
| 1743 | InFlag = Lo.getValue(2); |
| 1744 | VA = RVLocs[++i]; // skip ahead to next loc |
| 1745 | Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); |
| 1746 | Chain = Hi.getValue(1); |
| 1747 | InFlag = Hi.getValue(2); |
| 1748 | if (!Subtarget->isLittle()) |
| 1749 | std::swap (Lo, Hi); |
| 1750 | Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); |
| 1751 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, |
| 1752 | DAG.getConstant(1, dl, MVT::i32)); |
| 1753 | } |
| 1754 | } else { |
| 1755 | Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), |
| 1756 | InFlag); |
| 1757 | Chain = Val.getValue(1); |
| 1758 | InFlag = Val.getValue(2); |
| 1759 | } |
| 1760 | |
| 1761 | switch (VA.getLocInfo()) { |
| 1762 | default: llvm_unreachable("Unknown loc info!" ); |
| 1763 | case CCValAssign::Full: break; |
| 1764 | case CCValAssign::BCvt: |
| 1765 | Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); |
| 1766 | break; |
| 1767 | } |
| 1768 | |
| 1769 | InVals.push_back(Val); |
| 1770 | } |
| 1771 | |
| 1772 | return Chain; |
| 1773 | } |
| 1774 | |
| 1775 | /// LowerMemOpCallTo - Store the argument to the stack. |
| 1776 | SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, |
| 1777 | SDValue Arg, const SDLoc &dl, |
| 1778 | SelectionDAG &DAG, |
| 1779 | const CCValAssign &VA, |
| 1780 | ISD::ArgFlagsTy Flags) const { |
| 1781 | unsigned LocMemOffset = VA.getLocMemOffset(); |
| 1782 | SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); |
| 1783 | PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), |
| 1784 | StackPtr, PtrOff); |
| 1785 | return DAG.getStore( |
| 1786 | Chain, dl, Arg, PtrOff, |
| 1787 | MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset)); |
| 1788 | } |
| 1789 | |
| 1790 | void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, |
| 1791 | SDValue Chain, SDValue &Arg, |
| 1792 | RegsToPassVector &RegsToPass, |
| 1793 | CCValAssign &VA, CCValAssign &NextVA, |
| 1794 | SDValue &StackPtr, |
| 1795 | SmallVectorImpl<SDValue> &MemOpChains, |
| 1796 | ISD::ArgFlagsTy Flags) const { |
| 1797 | SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 1798 | DAG.getVTList(MVT::i32, MVT::i32), Arg); |
| 1799 | unsigned id = Subtarget->isLittle() ? 0 : 1; |
| 1800 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id))); |
| 1801 | |
| 1802 | if (NextVA.isRegLoc()) |
| 1803 | RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id))); |
| 1804 | else { |
| 1805 | assert(NextVA.isMemLoc()); |
| 1806 | if (!StackPtr.getNode()) |
| 1807 | StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, |
| 1808 | getPointerTy(DAG.getDataLayout())); |
| 1809 | |
| 1810 | MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id), |
| 1811 | dl, DAG, NextVA, |
| 1812 | Flags)); |
| 1813 | } |
| 1814 | } |
| 1815 | |
| 1816 | /// LowerCall - Lowering a call into a callseq_start <- |
| 1817 | /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter |
| 1818 | /// nodes. |
| 1819 | SDValue |
| 1820 | ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, |
| 1821 | SmallVectorImpl<SDValue> &InVals) const { |
| 1822 | SelectionDAG &DAG = CLI.DAG; |
| 1823 | SDLoc &dl = CLI.DL; |
| 1824 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
| 1825 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
| 1826 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
| 1827 | SDValue Chain = CLI.Chain; |
| 1828 | SDValue Callee = CLI.Callee; |
| 1829 | bool &isTailCall = CLI.IsTailCall; |
| 1830 | CallingConv::ID CallConv = CLI.CallConv; |
| 1831 | bool doesNotRet = CLI.DoesNotReturn; |
| 1832 | bool isVarArg = CLI.IsVarArg; |
| 1833 | |
| 1834 | MachineFunction &MF = DAG.getMachineFunction(); |
| 1835 | bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); |
| 1836 | bool isThisReturn = false; |
| 1837 | auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls" ); |
| 1838 | bool PreferIndirect = false; |
| 1839 | |
| 1840 | // Disable tail calls if they're not supported. |
| 1841 | if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true" ) |
| 1842 | isTailCall = false; |
| 1843 | |
| 1844 | if (isa<GlobalAddressSDNode>(Callee)) { |
| 1845 | // If we're optimizing for minimum size and the function is called three or |
| 1846 | // more times in this block, we can improve codesize by calling indirectly |
| 1847 | // as BLXr has a 16-bit encoding. |
| 1848 | auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); |
| 1849 | auto *BB = CLI.CS.getParent(); |
| 1850 | PreferIndirect = |
| 1851 | Subtarget->isThumb() && Subtarget->hasMinSize() && |
| 1852 | count_if(GV->users(), [&BB](const User *U) { |
| 1853 | return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB; |
| 1854 | }) > 2; |
| 1855 | } |
| 1856 | if (isTailCall) { |
| 1857 | // Check if it's really possible to do a tail call. |
| 1858 | isTailCall = IsEligibleForTailCallOptimization( |
| 1859 | Callee, CallConv, isVarArg, isStructRet, |
| 1860 | MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG, |
| 1861 | PreferIndirect); |
| 1862 | if (!isTailCall && CLI.CS && CLI.CS.isMustTailCall()) |
| 1863 | report_fatal_error("failed to perform tail call elimination on a call " |
| 1864 | "site marked musttail" ); |
| 1865 | // We don't support GuaranteedTailCallOpt for ARM, only automatically |
| 1866 | // detected sibcalls. |
| 1867 | if (isTailCall) |
| 1868 | ++NumTailCalls; |
| 1869 | } |
| 1870 | |
| 1871 | // Analyze operands of the call, assigning locations to each operand. |
| 1872 | SmallVector<CCValAssign, 16> ArgLocs; |
| 1873 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
| 1874 | *DAG.getContext()); |
| 1875 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg)); |
| 1876 | |
| 1877 | // Get a count of how many bytes are to be pushed on the stack. |
| 1878 | unsigned NumBytes = CCInfo.getNextStackOffset(); |
| 1879 | |
| 1880 | if (isTailCall) { |
| 1881 | // For tail calls, memory operands are available in our caller's stack. |
| 1882 | NumBytes = 0; |
| 1883 | } else { |
| 1884 | // Adjust the stack pointer for the new arguments... |
| 1885 | // These operations are automatically eliminated by the prolog/epilog pass |
| 1886 | Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); |
| 1887 | } |
| 1888 | |
| 1889 | SDValue StackPtr = |
| 1890 | DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout())); |
| 1891 | |
| 1892 | RegsToPassVector RegsToPass; |
| 1893 | SmallVector<SDValue, 8> MemOpChains; |
| 1894 | |
| 1895 | // Walk the register/memloc assignments, inserting copies/loads. In the case |
| 1896 | // of tail call optimization, arguments are handled later. |
| 1897 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); |
| 1898 | i != e; |
| 1899 | ++i, ++realArgIdx) { |
| 1900 | CCValAssign &VA = ArgLocs[i]; |
| 1901 | SDValue Arg = OutVals[realArgIdx]; |
| 1902 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; |
| 1903 | bool isByVal = Flags.isByVal(); |
| 1904 | |
| 1905 | // Promote the value if needed. |
| 1906 | switch (VA.getLocInfo()) { |
| 1907 | default: llvm_unreachable("Unknown loc info!" ); |
| 1908 | case CCValAssign::Full: break; |
| 1909 | case CCValAssign::SExt: |
| 1910 | Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); |
| 1911 | break; |
| 1912 | case CCValAssign::ZExt: |
| 1913 | Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); |
| 1914 | break; |
| 1915 | case CCValAssign::AExt: |
| 1916 | Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); |
| 1917 | break; |
| 1918 | case CCValAssign::BCvt: |
| 1919 | Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); |
| 1920 | break; |
| 1921 | } |
| 1922 | |
| 1923 | // f64 and v2f64 might be passed in i32 pairs and must be split into pieces |
| 1924 | if (VA.needsCustom()) { |
| 1925 | if (VA.getLocVT() == MVT::v2f64) { |
| 1926 | SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, |
| 1927 | DAG.getConstant(0, dl, MVT::i32)); |
| 1928 | SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, |
| 1929 | DAG.getConstant(1, dl, MVT::i32)); |
| 1930 | |
| 1931 | PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, |
| 1932 | VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); |
| 1933 | |
| 1934 | VA = ArgLocs[++i]; // skip ahead to next loc |
| 1935 | if (VA.isRegLoc()) { |
| 1936 | PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, |
| 1937 | VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); |
| 1938 | } else { |
| 1939 | assert(VA.isMemLoc()); |
| 1940 | |
| 1941 | MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, |
| 1942 | dl, DAG, VA, Flags)); |
| 1943 | } |
| 1944 | } else { |
| 1945 | PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], |
| 1946 | StackPtr, MemOpChains, Flags); |
| 1947 | } |
| 1948 | } else if (VA.isRegLoc()) { |
| 1949 | if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && |
| 1950 | Outs[0].VT == MVT::i32) { |
| 1951 | assert(VA.getLocVT() == MVT::i32 && |
| 1952 | "unexpected calling convention register assignment" ); |
| 1953 | assert(!Ins.empty() && Ins[0].VT == MVT::i32 && |
| 1954 | "unexpected use of 'returned'" ); |
| 1955 | isThisReturn = true; |
| 1956 | } |
| 1957 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); |
| 1958 | } else if (isByVal) { |
| 1959 | assert(VA.isMemLoc()); |
| 1960 | unsigned offset = 0; |
| 1961 | |
| 1962 | // True if this byval aggregate will be split between registers |
| 1963 | // and memory. |
| 1964 | unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); |
| 1965 | unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); |
| 1966 | |
| 1967 | if (CurByValIdx < ByValArgsCount) { |
| 1968 | |
| 1969 | unsigned RegBegin, RegEnd; |
| 1970 | CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); |
| 1971 | |
| 1972 | EVT PtrVT = |
| 1973 | DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); |
| 1974 | unsigned int i, j; |
| 1975 | for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { |
| 1976 | SDValue Const = DAG.getConstant(4*i, dl, MVT::i32); |
| 1977 | SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); |
| 1978 | SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, |
| 1979 | MachinePointerInfo(), |
| 1980 | DAG.InferPtrAlignment(AddArg)); |
| 1981 | MemOpChains.push_back(Load.getValue(1)); |
| 1982 | RegsToPass.push_back(std::make_pair(j, Load)); |
| 1983 | } |
| 1984 | |
| 1985 | // If parameter size outsides register area, "offset" value |
| 1986 | // helps us to calculate stack slot for remained part properly. |
| 1987 | offset = RegEnd - RegBegin; |
| 1988 | |
| 1989 | CCInfo.nextInRegsParam(); |
| 1990 | } |
| 1991 | |
| 1992 | if (Flags.getByValSize() > 4*offset) { |
| 1993 | auto PtrVT = getPointerTy(DAG.getDataLayout()); |
| 1994 | unsigned LocMemOffset = VA.getLocMemOffset(); |
| 1995 | SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); |
| 1996 | SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff); |
| 1997 | SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl); |
| 1998 | SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset); |
| 1999 | SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl, |
| 2000 | MVT::i32); |
| 2001 | SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl, |
| 2002 | MVT::i32); |
| 2003 | |
| 2004 | SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); |
| 2005 | SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; |
| 2006 | MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, |
| 2007 | Ops)); |
| 2008 | } |
| 2009 | } else if (!isTailCall) { |
| 2010 | assert(VA.isMemLoc()); |
| 2011 | |
| 2012 | MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, |
| 2013 | dl, DAG, VA, Flags)); |
| 2014 | } |
| 2015 | } |
| 2016 | |
| 2017 | if (!MemOpChains.empty()) |
| 2018 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); |
| 2019 | |
| 2020 | // Build a sequence of copy-to-reg nodes chained together with token chain |
| 2021 | // and flag operands which copy the outgoing args into the appropriate regs. |
| 2022 | SDValue InFlag; |
| 2023 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { |
| 2024 | Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, |
| 2025 | RegsToPass[i].second, InFlag); |
| 2026 | InFlag = Chain.getValue(1); |
| 2027 | } |
| 2028 | |
| 2029 | // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every |
| 2030 | // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol |
| 2031 | // node so that legalize doesn't hack it. |
| 2032 | bool isDirect = false; |
| 2033 | |
| 2034 | const TargetMachine &TM = getTargetMachine(); |
| 2035 | const Module *Mod = MF.getFunction().getParent(); |
| 2036 | const GlobalValue *GV = nullptr; |
| 2037 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) |
| 2038 | GV = G->getGlobal(); |
| 2039 | bool isStub = |
| 2040 | !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO(); |
| 2041 | |
| 2042 | bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); |
| 2043 | bool isLocalARMFunc = false; |
| 2044 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 2045 | auto PtrVt = getPointerTy(DAG.getDataLayout()); |
| 2046 | |
| 2047 | if (Subtarget->genLongCalls()) { |
| 2048 | assert((!isPositionIndependent() || Subtarget->isTargetWindows()) && |
| 2049 | "long-calls codegen is not position independent!" ); |
| 2050 | // Handle a global address or an external symbol. If it's not one of |
| 2051 | // those, the target's already in a register, so we don't need to do |
| 2052 | // anything extra. |
| 2053 | if (isa<GlobalAddressSDNode>(Callee)) { |
| 2054 | // Create a constant pool entry for the callee address |
| 2055 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2056 | ARMConstantPoolValue *CPV = |
| 2057 | ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); |
| 2058 | |
| 2059 | // Get the address of the callee into a register |
| 2060 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); |
| 2061 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 2062 | Callee = DAG.getLoad( |
| 2063 | PtrVt, dl, DAG.getEntryNode(), CPAddr, |
| 2064 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 2065 | } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { |
| 2066 | const char *Sym = S->getSymbol(); |
| 2067 | |
| 2068 | // Create a constant pool entry for the callee address |
| 2069 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2070 | ARMConstantPoolValue *CPV = |
| 2071 | ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, |
| 2072 | ARMPCLabelIndex, 0); |
| 2073 | // Get the address of the callee into a register |
| 2074 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); |
| 2075 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 2076 | Callee = DAG.getLoad( |
| 2077 | PtrVt, dl, DAG.getEntryNode(), CPAddr, |
| 2078 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 2079 | } |
| 2080 | } else if (isa<GlobalAddressSDNode>(Callee)) { |
| 2081 | if (!PreferIndirect) { |
| 2082 | isDirect = true; |
| 2083 | bool isDef = GV->isStrongDefinitionForLinker(); |
| 2084 | |
| 2085 | // ARM call to a local ARM function is predicable. |
| 2086 | isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); |
| 2087 | // tBX takes a register source operand. |
| 2088 | if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
| 2089 | assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?" ); |
| 2090 | Callee = DAG.getNode( |
| 2091 | ARMISD::WrapperPIC, dl, PtrVt, |
| 2092 | DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY)); |
| 2093 | Callee = DAG.getLoad( |
| 2094 | PtrVt, dl, DAG.getEntryNode(), Callee, |
| 2095 | MachinePointerInfo::getGOT(DAG.getMachineFunction()), |
| 2096 | /* Alignment = */ 0, MachineMemOperand::MODereferenceable | |
| 2097 | MachineMemOperand::MOInvariant); |
| 2098 | } else if (Subtarget->isTargetCOFF()) { |
| 2099 | assert(Subtarget->isTargetWindows() && |
| 2100 | "Windows is the only supported COFF target" ); |
| 2101 | unsigned TargetFlags = GV->hasDLLImportStorageClass() |
| 2102 | ? ARMII::MO_DLLIMPORT |
| 2103 | : ARMII::MO_NO_FLAG; |
| 2104 | Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0, |
| 2105 | TargetFlags); |
| 2106 | if (GV->hasDLLImportStorageClass()) |
| 2107 | Callee = |
| 2108 | DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), |
| 2109 | DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee), |
| 2110 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
| 2111 | } else { |
| 2112 | Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0); |
| 2113 | } |
| 2114 | } |
| 2115 | } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { |
| 2116 | isDirect = true; |
| 2117 | // tBX takes a register source operand. |
| 2118 | const char *Sym = S->getSymbol(); |
| 2119 | if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
| 2120 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2121 | ARMConstantPoolValue *CPV = |
| 2122 | ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, |
| 2123 | ARMPCLabelIndex, 4); |
| 2124 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); |
| 2125 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 2126 | Callee = DAG.getLoad( |
| 2127 | PtrVt, dl, DAG.getEntryNode(), CPAddr, |
| 2128 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 2129 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); |
| 2130 | Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel); |
| 2131 | } else { |
| 2132 | Callee = DAG.getTargetExternalFunctionSymbol(Sym, 0); |
| 2133 | } |
| 2134 | } |
| 2135 | |
| 2136 | // FIXME: handle tail calls differently. |
| 2137 | unsigned CallOpc; |
| 2138 | if (Subtarget->isThumb()) { |
| 2139 | if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) |
| 2140 | CallOpc = ARMISD::CALL_NOLINK; |
| 2141 | else |
| 2142 | CallOpc = ARMISD::CALL; |
| 2143 | } else { |
| 2144 | if (!isDirect && !Subtarget->hasV5TOps()) |
| 2145 | CallOpc = ARMISD::CALL_NOLINK; |
| 2146 | else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && |
| 2147 | // Emit regular call when code size is the priority |
| 2148 | !Subtarget->hasMinSize()) |
| 2149 | // "mov lr, pc; b _foo" to avoid confusing the RSP |
| 2150 | CallOpc = ARMISD::CALL_NOLINK; |
| 2151 | else |
| 2152 | CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; |
| 2153 | } |
| 2154 | |
| 2155 | std::vector<SDValue> Ops; |
| 2156 | Ops.push_back(Chain); |
| 2157 | Ops.push_back(Callee); |
| 2158 | |
| 2159 | // Add argument registers to the end of the list so that they are known live |
| 2160 | // into the call. |
| 2161 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) |
| 2162 | Ops.push_back(DAG.getRegister(RegsToPass[i].first, |
| 2163 | RegsToPass[i].second.getValueType())); |
| 2164 | |
| 2165 | // Add a register mask operand representing the call-preserved registers. |
| 2166 | if (!isTailCall) { |
| 2167 | const uint32_t *Mask; |
| 2168 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); |
| 2169 | if (isThisReturn) { |
| 2170 | // For 'this' returns, use the R0-preserving mask if applicable |
| 2171 | Mask = ARI->getThisReturnPreservedMask(MF, CallConv); |
| 2172 | if (!Mask) { |
| 2173 | // Set isThisReturn to false if the calling convention is not one that |
| 2174 | // allows 'returned' to be modeled in this way, so LowerCallResult does |
| 2175 | // not try to pass 'this' straight through |
| 2176 | isThisReturn = false; |
| 2177 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
| 2178 | } |
| 2179 | } else |
| 2180 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
| 2181 | |
| 2182 | assert(Mask && "Missing call preserved mask for calling convention" ); |
| 2183 | Ops.push_back(DAG.getRegisterMask(Mask)); |
| 2184 | } |
| 2185 | |
| 2186 | if (InFlag.getNode()) |
| 2187 | Ops.push_back(InFlag); |
| 2188 | |
| 2189 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
| 2190 | if (isTailCall) { |
| 2191 | MF.getFrameInfo().setHasTailCall(); |
| 2192 | return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops); |
| 2193 | } |
| 2194 | |
| 2195 | // Returns a chain and a flag for retval copy to use. |
| 2196 | Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); |
| 2197 | InFlag = Chain.getValue(1); |
| 2198 | |
| 2199 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), |
| 2200 | DAG.getIntPtrConstant(0, dl, true), InFlag, dl); |
| 2201 | if (!Ins.empty()) |
| 2202 | InFlag = Chain.getValue(1); |
| 2203 | |
| 2204 | // Handle result values, copying them out of physregs into vregs that we |
| 2205 | // return. |
| 2206 | return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, |
| 2207 | InVals, isThisReturn, |
| 2208 | isThisReturn ? OutVals[0] : SDValue()); |
| 2209 | } |
| 2210 | |
| 2211 | /// HandleByVal - Every parameter *after* a byval parameter is passed |
| 2212 | /// on the stack. Remember the next parameter register to allocate, |
| 2213 | /// and then confiscate the rest of the parameter registers to insure |
| 2214 | /// this. |
| 2215 | void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, |
| 2216 | unsigned Align) const { |
| 2217 | // Byval (as with any stack) slots are always at least 4 byte aligned. |
| 2218 | Align = std::max(Align, 4U); |
| 2219 | |
| 2220 | unsigned Reg = State->AllocateReg(GPRArgRegs); |
| 2221 | if (!Reg) |
| 2222 | return; |
| 2223 | |
| 2224 | unsigned AlignInRegs = Align / 4; |
| 2225 | unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; |
| 2226 | for (unsigned i = 0; i < Waste; ++i) |
| 2227 | Reg = State->AllocateReg(GPRArgRegs); |
| 2228 | |
| 2229 | if (!Reg) |
| 2230 | return; |
| 2231 | |
| 2232 | unsigned Excess = 4 * (ARM::R4 - Reg); |
| 2233 | |
| 2234 | // Special case when NSAA != SP and parameter size greater than size of |
| 2235 | // all remained GPR regs. In that case we can't split parameter, we must |
| 2236 | // send it to stack. We also must set NCRN to R4, so waste all |
| 2237 | // remained registers. |
| 2238 | const unsigned NSAAOffset = State->getNextStackOffset(); |
| 2239 | if (NSAAOffset != 0 && Size > Excess) { |
| 2240 | while (State->AllocateReg(GPRArgRegs)) |
| 2241 | ; |
| 2242 | return; |
| 2243 | } |
| 2244 | |
| 2245 | // First register for byval parameter is the first register that wasn't |
| 2246 | // allocated before this method call, so it would be "reg". |
| 2247 | // If parameter is small enough to be saved in range [reg, r4), then |
| 2248 | // the end (first after last) register would be reg + param-size-in-regs, |
| 2249 | // else parameter would be splitted between registers and stack, |
| 2250 | // end register would be r4 in this case. |
| 2251 | unsigned ByValRegBegin = Reg; |
| 2252 | unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4); |
| 2253 | State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd); |
| 2254 | // Note, first register is allocated in the beginning of function already, |
| 2255 | // allocate remained amount of registers we need. |
| 2256 | for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) |
| 2257 | State->AllocateReg(GPRArgRegs); |
| 2258 | // A byval parameter that is split between registers and memory needs its |
| 2259 | // size truncated here. |
| 2260 | // In the case where the entire structure fits in registers, we set the |
| 2261 | // size in memory to zero. |
| 2262 | Size = std::max<int>(Size - Excess, 0); |
| 2263 | } |
| 2264 | |
| 2265 | /// MatchingStackOffset - Return true if the given stack call argument is |
| 2266 | /// already available in the same position (relatively) of the caller's |
| 2267 | /// incoming argument stack. |
| 2268 | static |
| 2269 | bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, |
| 2270 | MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, |
| 2271 | const TargetInstrInfo *TII) { |
| 2272 | unsigned Bytes = Arg.getValueSizeInBits() / 8; |
| 2273 | int FI = std::numeric_limits<int>::max(); |
| 2274 | if (Arg.getOpcode() == ISD::CopyFromReg) { |
| 2275 | unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); |
| 2276 | if (!TargetRegisterInfo::isVirtualRegister(VR)) |
| 2277 | return false; |
| 2278 | MachineInstr *Def = MRI->getVRegDef(VR); |
| 2279 | if (!Def) |
| 2280 | return false; |
| 2281 | if (!Flags.isByVal()) { |
| 2282 | if (!TII->isLoadFromStackSlot(*Def, FI)) |
| 2283 | return false; |
| 2284 | } else { |
| 2285 | return false; |
| 2286 | } |
| 2287 | } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { |
| 2288 | if (Flags.isByVal()) |
| 2289 | // ByVal argument is passed in as a pointer but it's now being |
| 2290 | // dereferenced. e.g. |
| 2291 | // define @foo(%struct.X* %A) { |
| 2292 | // tail call @bar(%struct.X* byval %A) |
| 2293 | // } |
| 2294 | return false; |
| 2295 | SDValue Ptr = Ld->getBasePtr(); |
| 2296 | FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); |
| 2297 | if (!FINode) |
| 2298 | return false; |
| 2299 | FI = FINode->getIndex(); |
| 2300 | } else |
| 2301 | return false; |
| 2302 | |
| 2303 | assert(FI != std::numeric_limits<int>::max()); |
| 2304 | if (!MFI.isFixedObjectIndex(FI)) |
| 2305 | return false; |
| 2306 | return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI); |
| 2307 | } |
| 2308 | |
| 2309 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
| 2310 | /// for tail call optimization. Targets which want to do tail call |
| 2311 | /// optimization should implement this function. |
| 2312 | bool ARMTargetLowering::IsEligibleForTailCallOptimization( |
| 2313 | SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, |
| 2314 | bool isCalleeStructRet, bool isCallerStructRet, |
| 2315 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 2316 | const SmallVectorImpl<SDValue> &OutVals, |
| 2317 | const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG, |
| 2318 | const bool isIndirect) const { |
| 2319 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2320 | const Function &CallerF = MF.getFunction(); |
| 2321 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
| 2322 | |
| 2323 | assert(Subtarget->supportsTailCall()); |
| 2324 | |
| 2325 | // Indirect tail calls cannot be optimized for Thumb1 if the args |
| 2326 | // to the call take up r0-r3. The reason is that there are no legal registers |
| 2327 | // left to hold the pointer to the function to be called. |
| 2328 | if (Subtarget->isThumb1Only() && Outs.size() >= 4 && |
| 2329 | (!isa<GlobalAddressSDNode>(Callee.getNode()) || isIndirect)) |
| 2330 | return false; |
| 2331 | |
| 2332 | // Look for obvious safe cases to perform tail call optimization that do not |
| 2333 | // require ABI changes. This is what gcc calls sibcall. |
| 2334 | |
| 2335 | // Exception-handling functions need a special set of instructions to indicate |
| 2336 | // a return to the hardware. Tail-calling another function would probably |
| 2337 | // break this. |
| 2338 | if (CallerF.hasFnAttribute("interrupt" )) |
| 2339 | return false; |
| 2340 | |
| 2341 | // Also avoid sibcall optimization if either caller or callee uses struct |
| 2342 | // return semantics. |
| 2343 | if (isCalleeStructRet || isCallerStructRet) |
| 2344 | return false; |
| 2345 | |
| 2346 | // Externally-defined functions with weak linkage should not be |
| 2347 | // tail-called on ARM when the OS does not support dynamic |
| 2348 | // pre-emption of symbols, as the AAELF spec requires normal calls |
| 2349 | // to undefined weak functions to be replaced with a NOP or jump to the |
| 2350 | // next instruction. The behaviour of branch instructions in this |
| 2351 | // situation (as used for tail calls) is implementation-defined, so we |
| 2352 | // cannot rely on the linker replacing the tail call with a return. |
| 2353 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { |
| 2354 | const GlobalValue *GV = G->getGlobal(); |
| 2355 | const Triple &TT = getTargetMachine().getTargetTriple(); |
| 2356 | if (GV->hasExternalWeakLinkage() && |
| 2357 | (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) |
| 2358 | return false; |
| 2359 | } |
| 2360 | |
| 2361 | // Check that the call results are passed in the same way. |
| 2362 | LLVMContext &C = *DAG.getContext(); |
| 2363 | if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, |
| 2364 | CCAssignFnForReturn(CalleeCC, isVarArg), |
| 2365 | CCAssignFnForReturn(CallerCC, isVarArg))) |
| 2366 | return false; |
| 2367 | // The callee has to preserve all registers the caller needs to preserve. |
| 2368 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 2369 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); |
| 2370 | if (CalleeCC != CallerCC) { |
| 2371 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); |
| 2372 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) |
| 2373 | return false; |
| 2374 | } |
| 2375 | |
| 2376 | // If Caller's vararg or byval argument has been split between registers and |
| 2377 | // stack, do not perform tail call, since part of the argument is in caller's |
| 2378 | // local frame. |
| 2379 | const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); |
| 2380 | if (AFI_Caller->getArgRegsSaveSize()) |
| 2381 | return false; |
| 2382 | |
| 2383 | // If the callee takes no arguments then go on to check the results of the |
| 2384 | // call. |
| 2385 | if (!Outs.empty()) { |
| 2386 | // Check if stack adjustment is needed. For now, do not do this if any |
| 2387 | // argument is passed on the stack. |
| 2388 | SmallVector<CCValAssign, 16> ArgLocs; |
| 2389 | CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); |
| 2390 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); |
| 2391 | if (CCInfo.getNextStackOffset()) { |
| 2392 | // Check if the arguments are already laid out in the right way as |
| 2393 | // the caller's fixed stack objects. |
| 2394 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 2395 | const MachineRegisterInfo *MRI = &MF.getRegInfo(); |
| 2396 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 2397 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); |
| 2398 | i != e; |
| 2399 | ++i, ++realArgIdx) { |
| 2400 | CCValAssign &VA = ArgLocs[i]; |
| 2401 | EVT RegVT = VA.getLocVT(); |
| 2402 | SDValue Arg = OutVals[realArgIdx]; |
| 2403 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; |
| 2404 | if (VA.getLocInfo() == CCValAssign::Indirect) |
| 2405 | return false; |
| 2406 | if (VA.needsCustom()) { |
| 2407 | // f64 and vector types are split into multiple registers or |
| 2408 | // register/stack-slot combinations. The types will not match |
| 2409 | // the registers; give up on memory f64 refs until we figure |
| 2410 | // out what to do about this. |
| 2411 | if (!VA.isRegLoc()) |
| 2412 | return false; |
| 2413 | if (!ArgLocs[++i].isRegLoc()) |
| 2414 | return false; |
| 2415 | if (RegVT == MVT::v2f64) { |
| 2416 | if (!ArgLocs[++i].isRegLoc()) |
| 2417 | return false; |
| 2418 | if (!ArgLocs[++i].isRegLoc()) |
| 2419 | return false; |
| 2420 | } |
| 2421 | } else if (!VA.isRegLoc()) { |
| 2422 | if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, |
| 2423 | MFI, MRI, TII)) |
| 2424 | return false; |
| 2425 | } |
| 2426 | } |
| 2427 | } |
| 2428 | |
| 2429 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 2430 | if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) |
| 2431 | return false; |
| 2432 | } |
| 2433 | |
| 2434 | return true; |
| 2435 | } |
| 2436 | |
| 2437 | bool |
| 2438 | ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, |
| 2439 | MachineFunction &MF, bool isVarArg, |
| 2440 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 2441 | LLVMContext &Context) const { |
| 2442 | SmallVector<CCValAssign, 16> RVLocs; |
| 2443 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); |
| 2444 | return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); |
| 2445 | } |
| 2446 | |
| 2447 | static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, |
| 2448 | const SDLoc &DL, SelectionDAG &DAG) { |
| 2449 | const MachineFunction &MF = DAG.getMachineFunction(); |
| 2450 | const Function &F = MF.getFunction(); |
| 2451 | |
| 2452 | StringRef IntKind = F.getFnAttribute("interrupt" ).getValueAsString(); |
| 2453 | |
| 2454 | // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset |
| 2455 | // version of the "preferred return address". These offsets affect the return |
| 2456 | // instruction if this is a return from PL1 without hypervisor extensions. |
| 2457 | // IRQ/FIQ: +4 "subs pc, lr, #4" |
| 2458 | // SWI: 0 "subs pc, lr, #0" |
| 2459 | // ABORT: +4 "subs pc, lr, #4" |
| 2460 | // UNDEF: +4/+2 "subs pc, lr, #0" |
| 2461 | // UNDEF varies depending on where the exception came from ARM or Thumb |
| 2462 | // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. |
| 2463 | |
| 2464 | int64_t LROffset; |
| 2465 | if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || |
| 2466 | IntKind == "ABORT" ) |
| 2467 | LROffset = 4; |
| 2468 | else if (IntKind == "SWI" || IntKind == "UNDEF" ) |
| 2469 | LROffset = 0; |
| 2470 | else |
| 2471 | report_fatal_error("Unsupported interrupt attribute. If present, value " |
| 2472 | "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF" ); |
| 2473 | |
| 2474 | RetOps.insert(RetOps.begin() + 1, |
| 2475 | DAG.getConstant(LROffset, DL, MVT::i32, false)); |
| 2476 | |
| 2477 | return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps); |
| 2478 | } |
| 2479 | |
| 2480 | SDValue |
| 2481 | ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
| 2482 | bool isVarArg, |
| 2483 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 2484 | const SmallVectorImpl<SDValue> &OutVals, |
| 2485 | const SDLoc &dl, SelectionDAG &DAG) const { |
| 2486 | // CCValAssign - represent the assignment of the return value to a location. |
| 2487 | SmallVector<CCValAssign, 16> RVLocs; |
| 2488 | |
| 2489 | // CCState - Info about the registers and stack slots. |
| 2490 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
| 2491 | *DAG.getContext()); |
| 2492 | |
| 2493 | // Analyze outgoing return values. |
| 2494 | CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); |
| 2495 | |
| 2496 | SDValue Flag; |
| 2497 | SmallVector<SDValue, 4> RetOps; |
| 2498 | RetOps.push_back(Chain); // Operand #0 = Chain (updated below) |
| 2499 | bool isLittleEndian = Subtarget->isLittle(); |
| 2500 | |
| 2501 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2502 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 2503 | AFI->setReturnRegsCount(RVLocs.size()); |
| 2504 | |
| 2505 | // Copy the result values into the output registers. |
| 2506 | for (unsigned i = 0, realRVLocIdx = 0; |
| 2507 | i != RVLocs.size(); |
| 2508 | ++i, ++realRVLocIdx) { |
| 2509 | CCValAssign &VA = RVLocs[i]; |
| 2510 | assert(VA.isRegLoc() && "Can only return in registers!" ); |
| 2511 | |
| 2512 | SDValue Arg = OutVals[realRVLocIdx]; |
| 2513 | bool ReturnF16 = false; |
| 2514 | |
| 2515 | if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) { |
| 2516 | // Half-precision return values can be returned like this: |
| 2517 | // |
| 2518 | // t11 f16 = fadd ... |
| 2519 | // t12: i16 = bitcast t11 |
| 2520 | // t13: i32 = zero_extend t12 |
| 2521 | // t14: f32 = bitcast t13 <~~~~~~~ Arg |
| 2522 | // |
| 2523 | // to avoid code generation for bitcasts, we simply set Arg to the node |
| 2524 | // that produces the f16 value, t11 in this case. |
| 2525 | // |
| 2526 | if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) { |
| 2527 | SDValue ZE = Arg.getOperand(0); |
| 2528 | if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) { |
| 2529 | SDValue BC = ZE.getOperand(0); |
| 2530 | if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) { |
| 2531 | Arg = BC.getOperand(0); |
| 2532 | ReturnF16 = true; |
| 2533 | } |
| 2534 | } |
| 2535 | } |
| 2536 | } |
| 2537 | |
| 2538 | switch (VA.getLocInfo()) { |
| 2539 | default: llvm_unreachable("Unknown loc info!" ); |
| 2540 | case CCValAssign::Full: break; |
| 2541 | case CCValAssign::BCvt: |
| 2542 | if (!ReturnF16) |
| 2543 | Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); |
| 2544 | break; |
| 2545 | } |
| 2546 | |
| 2547 | if (VA.needsCustom()) { |
| 2548 | if (VA.getLocVT() == MVT::v2f64) { |
| 2549 | // Extract the first half and return it in two registers. |
| 2550 | SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, |
| 2551 | DAG.getConstant(0, dl, MVT::i32)); |
| 2552 | SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 2553 | DAG.getVTList(MVT::i32, MVT::i32), Half); |
| 2554 | |
| 2555 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), |
| 2556 | HalfGPRs.getValue(isLittleEndian ? 0 : 1), |
| 2557 | Flag); |
| 2558 | Flag = Chain.getValue(1); |
| 2559 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); |
| 2560 | VA = RVLocs[++i]; // skip ahead to next loc |
| 2561 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), |
| 2562 | HalfGPRs.getValue(isLittleEndian ? 1 : 0), |
| 2563 | Flag); |
| 2564 | Flag = Chain.getValue(1); |
| 2565 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); |
| 2566 | VA = RVLocs[++i]; // skip ahead to next loc |
| 2567 | |
| 2568 | // Extract the 2nd half and fall through to handle it as an f64 value. |
| 2569 | Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, |
| 2570 | DAG.getConstant(1, dl, MVT::i32)); |
| 2571 | } |
| 2572 | // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is |
| 2573 | // available. |
| 2574 | SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 2575 | DAG.getVTList(MVT::i32, MVT::i32), Arg); |
| 2576 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), |
| 2577 | fmrrd.getValue(isLittleEndian ? 0 : 1), |
| 2578 | Flag); |
| 2579 | Flag = Chain.getValue(1); |
| 2580 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); |
| 2581 | VA = RVLocs[++i]; // skip ahead to next loc |
| 2582 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), |
| 2583 | fmrrd.getValue(isLittleEndian ? 1 : 0), |
| 2584 | Flag); |
| 2585 | } else |
| 2586 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); |
| 2587 | |
| 2588 | // Guarantee that all emitted copies are |
| 2589 | // stuck together, avoiding something bad. |
| 2590 | Flag = Chain.getValue(1); |
| 2591 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), |
| 2592 | ReturnF16 ? MVT::f16 : VA.getLocVT())); |
| 2593 | } |
| 2594 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 2595 | const MCPhysReg *I = |
| 2596 | TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); |
| 2597 | if (I) { |
| 2598 | for (; *I; ++I) { |
| 2599 | if (ARM::GPRRegClass.contains(*I)) |
| 2600 | RetOps.push_back(DAG.getRegister(*I, MVT::i32)); |
| 2601 | else if (ARM::DPRRegClass.contains(*I)) |
| 2602 | RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); |
| 2603 | else |
| 2604 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
| 2605 | } |
| 2606 | } |
| 2607 | |
| 2608 | // Update chain and glue. |
| 2609 | RetOps[0] = Chain; |
| 2610 | if (Flag.getNode()) |
| 2611 | RetOps.push_back(Flag); |
| 2612 | |
| 2613 | // CPUs which aren't M-class use a special sequence to return from |
| 2614 | // exceptions (roughly, any instruction setting pc and cpsr simultaneously, |
| 2615 | // though we use "subs pc, lr, #N"). |
| 2616 | // |
| 2617 | // M-class CPUs actually use a normal return sequence with a special |
| 2618 | // (hardware-provided) value in LR, so the normal code path works. |
| 2619 | if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt" ) && |
| 2620 | !Subtarget->isMClass()) { |
| 2621 | if (Subtarget->isThumb1Only()) |
| 2622 | report_fatal_error("interrupt attribute is not supported in Thumb1" ); |
| 2623 | return LowerInterruptReturn(RetOps, dl, DAG); |
| 2624 | } |
| 2625 | |
| 2626 | return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps); |
| 2627 | } |
| 2628 | |
| 2629 | bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { |
| 2630 | if (N->getNumValues() != 1) |
| 2631 | return false; |
| 2632 | if (!N->hasNUsesOfValue(1, 0)) |
| 2633 | return false; |
| 2634 | |
| 2635 | SDValue TCChain = Chain; |
| 2636 | SDNode *Copy = *N->use_begin(); |
| 2637 | if (Copy->getOpcode() == ISD::CopyToReg) { |
| 2638 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
| 2639 | // perform a tail call. |
| 2640 | if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
| 2641 | return false; |
| 2642 | TCChain = Copy->getOperand(0); |
| 2643 | } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { |
| 2644 | SDNode *VMov = Copy; |
| 2645 | // f64 returned in a pair of GPRs. |
| 2646 | SmallPtrSet<SDNode*, 2> Copies; |
| 2647 | for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); |
| 2648 | UI != UE; ++UI) { |
| 2649 | if (UI->getOpcode() != ISD::CopyToReg) |
| 2650 | return false; |
| 2651 | Copies.insert(*UI); |
| 2652 | } |
| 2653 | if (Copies.size() > 2) |
| 2654 | return false; |
| 2655 | |
| 2656 | for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); |
| 2657 | UI != UE; ++UI) { |
| 2658 | SDValue UseChain = UI->getOperand(0); |
| 2659 | if (Copies.count(UseChain.getNode())) |
| 2660 | // Second CopyToReg |
| 2661 | Copy = *UI; |
| 2662 | else { |
| 2663 | // We are at the top of this chain. |
| 2664 | // If the copy has a glue operand, we conservatively assume it |
| 2665 | // isn't safe to perform a tail call. |
| 2666 | if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue) |
| 2667 | return false; |
| 2668 | // First CopyToReg |
| 2669 | TCChain = UseChain; |
| 2670 | } |
| 2671 | } |
| 2672 | } else if (Copy->getOpcode() == ISD::BITCAST) { |
| 2673 | // f32 returned in a single GPR. |
| 2674 | if (!Copy->hasOneUse()) |
| 2675 | return false; |
| 2676 | Copy = *Copy->use_begin(); |
| 2677 | if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) |
| 2678 | return false; |
| 2679 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
| 2680 | // perform a tail call. |
| 2681 | if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
| 2682 | return false; |
| 2683 | TCChain = Copy->getOperand(0); |
| 2684 | } else { |
| 2685 | return false; |
| 2686 | } |
| 2687 | |
| 2688 | bool HasRet = false; |
| 2689 | for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); |
| 2690 | UI != UE; ++UI) { |
| 2691 | if (UI->getOpcode() != ARMISD::RET_FLAG && |
| 2692 | UI->getOpcode() != ARMISD::INTRET_FLAG) |
| 2693 | return false; |
| 2694 | HasRet = true; |
| 2695 | } |
| 2696 | |
| 2697 | if (!HasRet) |
| 2698 | return false; |
| 2699 | |
| 2700 | Chain = TCChain; |
| 2701 | return true; |
| 2702 | } |
| 2703 | |
| 2704 | bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
| 2705 | if (!Subtarget->supportsTailCall()) |
| 2706 | return false; |
| 2707 | |
| 2708 | auto Attr = |
| 2709 | CI->getParent()->getParent()->getFnAttribute("disable-tail-calls" ); |
| 2710 | if (!CI->isTailCall() || Attr.getValueAsString() == "true" ) |
| 2711 | return false; |
| 2712 | |
| 2713 | return true; |
| 2714 | } |
| 2715 | |
| 2716 | // Trying to write a 64 bit value so need to split into two 32 bit values first, |
| 2717 | // and pass the lower and high parts through. |
| 2718 | static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { |
| 2719 | SDLoc DL(Op); |
| 2720 | SDValue WriteValue = Op->getOperand(2); |
| 2721 | |
| 2722 | // This function is only supposed to be called for i64 type argument. |
| 2723 | assert(WriteValue.getValueType() == MVT::i64 |
| 2724 | && "LowerWRITE_REGISTER called for non-i64 type argument." ); |
| 2725 | |
| 2726 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, |
| 2727 | DAG.getConstant(0, DL, MVT::i32)); |
| 2728 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, |
| 2729 | DAG.getConstant(1, DL, MVT::i32)); |
| 2730 | SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi }; |
| 2731 | return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops); |
| 2732 | } |
| 2733 | |
| 2734 | // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as |
| 2735 | // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is |
| 2736 | // one of the above mentioned nodes. It has to be wrapped because otherwise |
| 2737 | // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only |
| 2738 | // be used to form addressing mode. These wrapped nodes will be selected |
| 2739 | // into MOVi. |
| 2740 | SDValue ARMTargetLowering::LowerConstantPool(SDValue Op, |
| 2741 | SelectionDAG &DAG) const { |
| 2742 | EVT PtrVT = Op.getValueType(); |
| 2743 | // FIXME there is no actual debug info here |
| 2744 | SDLoc dl(Op); |
| 2745 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); |
| 2746 | SDValue Res; |
| 2747 | |
| 2748 | // When generating execute-only code Constant Pools must be promoted to the |
| 2749 | // global data section. It's a bit ugly that we can't share them across basic |
| 2750 | // blocks, but this way we guarantee that execute-only behaves correct with |
| 2751 | // position-independent addressing modes. |
| 2752 | if (Subtarget->genExecuteOnly()) { |
| 2753 | auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); |
| 2754 | auto T = const_cast<Type*>(CP->getType()); |
| 2755 | auto C = const_cast<Constant*>(CP->getConstVal()); |
| 2756 | auto M = const_cast<Module*>(DAG.getMachineFunction(). |
| 2757 | getFunction().getParent()); |
| 2758 | auto GV = new GlobalVariable( |
| 2759 | *M, T, /*isConst=*/true, GlobalVariable::InternalLinkage, C, |
| 2760 | Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + |
| 2761 | Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + |
| 2762 | Twine(AFI->createPICLabelUId()) |
| 2763 | ); |
| 2764 | SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV), |
| 2765 | dl, PtrVT); |
| 2766 | return LowerGlobalAddress(GA, DAG); |
| 2767 | } |
| 2768 | |
| 2769 | if (CP->isMachineConstantPoolEntry()) |
| 2770 | Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, |
| 2771 | CP->getAlignment()); |
| 2772 | else |
| 2773 | Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, |
| 2774 | CP->getAlignment()); |
| 2775 | return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); |
| 2776 | } |
| 2777 | |
| 2778 | unsigned ARMTargetLowering::getJumpTableEncoding() const { |
| 2779 | return MachineJumpTableInfo::EK_Inline; |
| 2780 | } |
| 2781 | |
| 2782 | SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, |
| 2783 | SelectionDAG &DAG) const { |
| 2784 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2785 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 2786 | unsigned ARMPCLabelIndex = 0; |
| 2787 | SDLoc DL(Op); |
| 2788 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 2789 | const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); |
| 2790 | SDValue CPAddr; |
| 2791 | bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI(); |
| 2792 | if (!IsPositionIndependent) { |
| 2793 | CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); |
| 2794 | } else { |
| 2795 | unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 2796 | ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2797 | ARMConstantPoolValue *CPV = |
| 2798 | ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, |
| 2799 | ARMCP::CPBlockAddress, PCAdj); |
| 2800 | CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); |
| 2801 | } |
| 2802 | CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); |
| 2803 | SDValue Result = DAG.getLoad( |
| 2804 | PtrVT, DL, DAG.getEntryNode(), CPAddr, |
| 2805 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 2806 | if (!IsPositionIndependent) |
| 2807 | return Result; |
| 2808 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32); |
| 2809 | return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); |
| 2810 | } |
| 2811 | |
| 2812 | /// Convert a TLS address reference into the correct sequence of loads |
| 2813 | /// and calls to compute the variable's address for Darwin, and return an |
| 2814 | /// SDValue containing the final node. |
| 2815 | |
| 2816 | /// Darwin only has one TLS scheme which must be capable of dealing with the |
| 2817 | /// fully general situation, in the worst case. This means: |
| 2818 | /// + "extern __thread" declaration. |
| 2819 | /// + Defined in a possibly unknown dynamic library. |
| 2820 | /// |
| 2821 | /// The general system is that each __thread variable has a [3 x i32] descriptor |
| 2822 | /// which contains information used by the runtime to calculate the address. The |
| 2823 | /// only part of this the compiler needs to know about is the first word, which |
| 2824 | /// contains a function pointer that must be called with the address of the |
| 2825 | /// entire descriptor in "r0". |
| 2826 | /// |
| 2827 | /// Since this descriptor may be in a different unit, in general access must |
| 2828 | /// proceed along the usual ARM rules. A common sequence to produce is: |
| 2829 | /// |
| 2830 | /// movw rT1, :lower16:_var$non_lazy_ptr |
| 2831 | /// movt rT1, :upper16:_var$non_lazy_ptr |
| 2832 | /// ldr r0, [rT1] |
| 2833 | /// ldr rT2, [r0] |
| 2834 | /// blx rT2 |
| 2835 | /// [...address now in r0...] |
| 2836 | SDValue |
| 2837 | ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, |
| 2838 | SelectionDAG &DAG) const { |
| 2839 | assert(Subtarget->isTargetDarwin() && |
| 2840 | "This function expects a Darwin target" ); |
| 2841 | SDLoc DL(Op); |
| 2842 | |
| 2843 | // First step is to get the address of the actua global symbol. This is where |
| 2844 | // the TLS descriptor lives. |
| 2845 | SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); |
| 2846 | |
| 2847 | // The first entry in the descriptor is a function pointer that we must call |
| 2848 | // to obtain the address of the variable. |
| 2849 | SDValue Chain = DAG.getEntryNode(); |
| 2850 | SDValue FuncTLVGet = DAG.getLoad( |
| 2851 | MVT::i32, DL, Chain, DescAddr, |
| 2852 | MachinePointerInfo::getGOT(DAG.getMachineFunction()), |
| 2853 | /* Alignment = */ 4, |
| 2854 | MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable | |
| 2855 | MachineMemOperand::MOInvariant); |
| 2856 | Chain = FuncTLVGet.getValue(1); |
| 2857 | |
| 2858 | MachineFunction &F = DAG.getMachineFunction(); |
| 2859 | MachineFrameInfo &MFI = F.getFrameInfo(); |
| 2860 | MFI.setAdjustsStack(true); |
| 2861 | |
| 2862 | // TLS calls preserve all registers except those that absolutely must be |
| 2863 | // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be |
| 2864 | // silly). |
| 2865 | auto TRI = |
| 2866 | getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo(); |
| 2867 | auto ARI = static_cast<const ARMRegisterInfo *>(TRI); |
| 2868 | const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction()); |
| 2869 | |
| 2870 | // Finally, we can make the call. This is just a degenerate version of a |
| 2871 | // normal AArch64 call node: r0 takes the address of the descriptor, and |
| 2872 | // returns the address of the variable in this thread. |
| 2873 | Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue()); |
| 2874 | Chain = |
| 2875 | DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), |
| 2876 | Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32), |
| 2877 | DAG.getRegisterMask(Mask), Chain.getValue(1)); |
| 2878 | return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1)); |
| 2879 | } |
| 2880 | |
| 2881 | SDValue |
| 2882 | ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, |
| 2883 | SelectionDAG &DAG) const { |
| 2884 | assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering" ); |
| 2885 | |
| 2886 | SDValue Chain = DAG.getEntryNode(); |
| 2887 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 2888 | SDLoc DL(Op); |
| 2889 | |
| 2890 | // Load the current TEB (thread environment block) |
| 2891 | SDValue Ops[] = {Chain, |
| 2892 | DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32), |
| 2893 | DAG.getConstant(15, DL, MVT::i32), |
| 2894 | DAG.getConstant(0, DL, MVT::i32), |
| 2895 | DAG.getConstant(13, DL, MVT::i32), |
| 2896 | DAG.getConstant(0, DL, MVT::i32), |
| 2897 | DAG.getConstant(2, DL, MVT::i32)}; |
| 2898 | SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, |
| 2899 | DAG.getVTList(MVT::i32, MVT::Other), Ops); |
| 2900 | |
| 2901 | SDValue TEB = CurrentTEB.getValue(0); |
| 2902 | Chain = CurrentTEB.getValue(1); |
| 2903 | |
| 2904 | // Load the ThreadLocalStoragePointer from the TEB |
| 2905 | // A pointer to the TLS array is located at offset 0x2c from the TEB. |
| 2906 | SDValue TLSArray = |
| 2907 | DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL)); |
| 2908 | TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo()); |
| 2909 | |
| 2910 | // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 |
| 2911 | // offset into the TLSArray. |
| 2912 | |
| 2913 | // Load the TLS index from the C runtime |
| 2914 | SDValue TLSIndex = |
| 2915 | DAG.getTargetExternalSymbol("_tls_index" , PtrVT, ARMII::MO_NO_FLAG); |
| 2916 | TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex); |
| 2917 | TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo()); |
| 2918 | |
| 2919 | SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, |
| 2920 | DAG.getConstant(2, DL, MVT::i32)); |
| 2921 | SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, |
| 2922 | DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), |
| 2923 | MachinePointerInfo()); |
| 2924 | |
| 2925 | // Get the offset of the start of the .tls section (section base) |
| 2926 | const auto *GA = cast<GlobalAddressSDNode>(Op); |
| 2927 | auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL); |
| 2928 | SDValue Offset = DAG.getLoad( |
| 2929 | PtrVT, DL, Chain, DAG.getNode(ARMISD::Wrapper, DL, MVT::i32, |
| 2930 | DAG.getTargetConstantPool(CPV, PtrVT, 4)), |
| 2931 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 2932 | |
| 2933 | return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset); |
| 2934 | } |
| 2935 | |
| 2936 | // Lower ISD::GlobalTLSAddress using the "general dynamic" model |
| 2937 | SDValue |
| 2938 | ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, |
| 2939 | SelectionDAG &DAG) const { |
| 2940 | SDLoc dl(GA); |
| 2941 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 2942 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 2943 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2944 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 2945 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2946 | ARMConstantPoolValue *CPV = |
| 2947 | ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, |
| 2948 | ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); |
| 2949 | SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); |
| 2950 | Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); |
| 2951 | Argument = DAG.getLoad( |
| 2952 | PtrVT, dl, DAG.getEntryNode(), Argument, |
| 2953 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 2954 | SDValue Chain = Argument.getValue(1); |
| 2955 | |
| 2956 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); |
| 2957 | Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); |
| 2958 | |
| 2959 | // call __tls_get_addr. |
| 2960 | ArgListTy Args; |
| 2961 | ArgListEntry Entry; |
| 2962 | Entry.Node = Argument; |
| 2963 | Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); |
| 2964 | Args.push_back(Entry); |
| 2965 | |
| 2966 | // FIXME: is there useful debug info available here? |
| 2967 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 2968 | CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( |
| 2969 | CallingConv::C, Type::getInt32Ty(*DAG.getContext()), |
| 2970 | DAG.getExternalFunctionSymbol("__tls_get_addr" ), std::move(Args)); |
| 2971 | |
| 2972 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
| 2973 | return CallResult.first; |
| 2974 | } |
| 2975 | |
| 2976 | // Lower ISD::GlobalTLSAddress using the "initial exec" or |
| 2977 | // "local exec" model. |
| 2978 | SDValue |
| 2979 | ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, |
| 2980 | SelectionDAG &DAG, |
| 2981 | TLSModel::Model model) const { |
| 2982 | const GlobalValue *GV = GA->getGlobal(); |
| 2983 | SDLoc dl(GA); |
| 2984 | SDValue Offset; |
| 2985 | SDValue Chain = DAG.getEntryNode(); |
| 2986 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 2987 | // Get the Thread Pointer |
| 2988 | SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); |
| 2989 | |
| 2990 | if (model == TLSModel::InitialExec) { |
| 2991 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2992 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 2993 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2994 | // Initial exec model. |
| 2995 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 2996 | ARMConstantPoolValue *CPV = |
| 2997 | ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, |
| 2998 | ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, |
| 2999 | true); |
| 3000 | Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); |
| 3001 | Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); |
| 3002 | Offset = DAG.getLoad( |
| 3003 | PtrVT, dl, Chain, Offset, |
| 3004 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3005 | Chain = Offset.getValue(1); |
| 3006 | |
| 3007 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); |
| 3008 | Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); |
| 3009 | |
| 3010 | Offset = DAG.getLoad( |
| 3011 | PtrVT, dl, Chain, Offset, |
| 3012 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3013 | } else { |
| 3014 | // local exec model |
| 3015 | assert(model == TLSModel::LocalExec); |
| 3016 | ARMConstantPoolValue *CPV = |
| 3017 | ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); |
| 3018 | Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); |
| 3019 | Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); |
| 3020 | Offset = DAG.getLoad( |
| 3021 | PtrVT, dl, Chain, Offset, |
| 3022 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3023 | } |
| 3024 | |
| 3025 | // The address of the thread local variable is the add of the thread |
| 3026 | // pointer with the offset of the variable. |
| 3027 | return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); |
| 3028 | } |
| 3029 | |
| 3030 | SDValue |
| 3031 | ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { |
| 3032 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); |
| 3033 | if (DAG.getTarget().useEmulatedTLS()) |
| 3034 | return LowerToTLSEmulatedModel(GA, DAG); |
| 3035 | |
| 3036 | if (Subtarget->isTargetDarwin()) |
| 3037 | return LowerGlobalTLSAddressDarwin(Op, DAG); |
| 3038 | |
| 3039 | if (Subtarget->isTargetWindows()) |
| 3040 | return LowerGlobalTLSAddressWindows(Op, DAG); |
| 3041 | |
| 3042 | // TODO: implement the "local dynamic" model |
| 3043 | assert(Subtarget->isTargetELF() && "Only ELF implemented here" ); |
| 3044 | TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); |
| 3045 | |
| 3046 | switch (model) { |
| 3047 | case TLSModel::GeneralDynamic: |
| 3048 | case TLSModel::LocalDynamic: |
| 3049 | return LowerToTLSGeneralDynamicModel(GA, DAG); |
| 3050 | case TLSModel::InitialExec: |
| 3051 | case TLSModel::LocalExec: |
| 3052 | return LowerToTLSExecModels(GA, DAG, model); |
| 3053 | } |
| 3054 | llvm_unreachable("bogus TLS model" ); |
| 3055 | } |
| 3056 | |
| 3057 | /// Return true if all users of V are within function F, looking through |
| 3058 | /// ConstantExprs. |
| 3059 | static bool allUsersAreInFunction(const Value *V, const Function *F) { |
| 3060 | SmallVector<const User*,4> Worklist; |
| 3061 | for (auto *U : V->users()) |
| 3062 | Worklist.push_back(U); |
| 3063 | while (!Worklist.empty()) { |
| 3064 | auto *U = Worklist.pop_back_val(); |
| 3065 | if (isa<ConstantExpr>(U)) { |
| 3066 | for (auto *UU : U->users()) |
| 3067 | Worklist.push_back(UU); |
| 3068 | continue; |
| 3069 | } |
| 3070 | |
| 3071 | auto *I = dyn_cast<Instruction>(U); |
| 3072 | if (!I || I->getParent()->getParent() != F) |
| 3073 | return false; |
| 3074 | } |
| 3075 | return true; |
| 3076 | } |
| 3077 | |
| 3078 | static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, |
| 3079 | const GlobalValue *GV, SelectionDAG &DAG, |
| 3080 | EVT PtrVT, const SDLoc &dl) { |
| 3081 | // If we're creating a pool entry for a constant global with unnamed address, |
| 3082 | // and the global is small enough, we can emit it inline into the constant pool |
| 3083 | // to save ourselves an indirection. |
| 3084 | // |
| 3085 | // This is a win if the constant is only used in one function (so it doesn't |
| 3086 | // need to be duplicated) or duplicating the constant wouldn't increase code |
| 3087 | // size (implying the constant is no larger than 4 bytes). |
| 3088 | const Function &F = DAG.getMachineFunction().getFunction(); |
| 3089 | |
| 3090 | // We rely on this decision to inline being idemopotent and unrelated to the |
| 3091 | // use-site. We know that if we inline a variable at one use site, we'll |
| 3092 | // inline it elsewhere too (and reuse the constant pool entry). Fast-isel |
| 3093 | // doesn't know about this optimization, so bail out if it's enabled else |
| 3094 | // we could decide to inline here (and thus never emit the GV) but require |
| 3095 | // the GV from fast-isel generated code. |
| 3096 | if (!EnableConstpoolPromotion || |
| 3097 | DAG.getMachineFunction().getTarget().Options.EnableFastISel) |
| 3098 | return SDValue(); |
| 3099 | |
| 3100 | auto *GVar = dyn_cast<GlobalVariable>(GV); |
| 3101 | if (!GVar || !GVar->hasInitializer() || |
| 3102 | !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() || |
| 3103 | !GVar->hasLocalLinkage()) |
| 3104 | return SDValue(); |
| 3105 | |
| 3106 | // If we inline a value that contains relocations, we move the relocations |
| 3107 | // from .data to .text. This is not allowed in position-independent code. |
| 3108 | auto *Init = GVar->getInitializer(); |
| 3109 | if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) && |
| 3110 | Init->needsRelocation()) |
| 3111 | return SDValue(); |
| 3112 | |
| 3113 | // The constant islands pass can only really deal with alignment requests |
| 3114 | // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote |
| 3115 | // any type wanting greater alignment requirements than 4 bytes. We also |
| 3116 | // can only promote constants that are multiples of 4 bytes in size or |
| 3117 | // are paddable to a multiple of 4. Currently we only try and pad constants |
| 3118 | // that are strings for simplicity. |
| 3119 | auto *CDAInit = dyn_cast<ConstantDataArray>(Init); |
| 3120 | unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType()); |
| 3121 | unsigned Align = DAG.getDataLayout().getPreferredAlignment(GVar); |
| 3122 | unsigned RequiredPadding = 4 - (Size % 4); |
| 3123 | bool PaddingPossible = |
| 3124 | RequiredPadding == 4 || (CDAInit && CDAInit->isString()); |
| 3125 | if (!PaddingPossible || Align > 4 || Size > ConstpoolPromotionMaxSize || |
| 3126 | Size == 0) |
| 3127 | return SDValue(); |
| 3128 | |
| 3129 | unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding); |
| 3130 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3131 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3132 | |
| 3133 | // We can't bloat the constant pool too much, else the ConstantIslands pass |
| 3134 | // may fail to converge. If we haven't promoted this global yet (it may have |
| 3135 | // multiple uses), and promoting it would increase the constant pool size (Sz |
| 3136 | // > 4), ensure we have space to do so up to MaxTotal. |
| 3137 | if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4) |
| 3138 | if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >= |
| 3139 | ConstpoolPromotionMaxTotal) |
| 3140 | return SDValue(); |
| 3141 | |
| 3142 | // This is only valid if all users are in a single function; we can't clone |
| 3143 | // the constant in general. The LLVM IR unnamed_addr allows merging |
| 3144 | // constants, but not cloning them. |
| 3145 | // |
| 3146 | // We could potentially allow cloning if we could prove all uses of the |
| 3147 | // constant in the current function don't care about the address, like |
| 3148 | // printf format strings. But that isn't implemented for now. |
| 3149 | if (!allUsersAreInFunction(GVar, &F)) |
| 3150 | return SDValue(); |
| 3151 | |
| 3152 | // We're going to inline this global. Pad it out if needed. |
| 3153 | if (RequiredPadding != 4) { |
| 3154 | StringRef S = CDAInit->getAsString(); |
| 3155 | |
| 3156 | SmallVector<uint8_t,16> V(S.size()); |
| 3157 | std::copy(S.bytes_begin(), S.bytes_end(), V.begin()); |
| 3158 | while (RequiredPadding--) |
| 3159 | V.push_back(0); |
| 3160 | Init = ConstantDataArray::get(*DAG.getContext(), V); |
| 3161 | } |
| 3162 | |
| 3163 | auto CPVal = ARMConstantPoolConstant::Create(GVar, Init); |
| 3164 | SDValue CPAddr = |
| 3165 | DAG.getTargetConstantPool(CPVal, PtrVT, /*Align=*/4); |
| 3166 | if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) { |
| 3167 | AFI->markGlobalAsPromotedToConstantPool(GVar); |
| 3168 | AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() + |
| 3169 | PaddedSize - 4); |
| 3170 | } |
| 3171 | ++NumConstpoolPromoted; |
| 3172 | return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 3173 | } |
| 3174 | |
| 3175 | bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const { |
| 3176 | if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) |
| 3177 | if (!(GV = GA->getBaseObject())) |
| 3178 | return false; |
| 3179 | if (const auto *V = dyn_cast<GlobalVariable>(GV)) |
| 3180 | return V->isConstant(); |
| 3181 | return isa<Function>(GV); |
| 3182 | } |
| 3183 | |
| 3184 | SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op, |
| 3185 | SelectionDAG &DAG) const { |
| 3186 | switch (Subtarget->getTargetTriple().getObjectFormat()) { |
| 3187 | default: llvm_unreachable("unknown object format" ); |
| 3188 | case Triple::COFF: |
| 3189 | return LowerGlobalAddressWindows(Op, DAG); |
| 3190 | case Triple::ELF: |
| 3191 | return LowerGlobalAddressELF(Op, DAG); |
| 3192 | case Triple::MachO: |
| 3193 | return LowerGlobalAddressDarwin(Op, DAG); |
| 3194 | } |
| 3195 | } |
| 3196 | |
| 3197 | SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, |
| 3198 | SelectionDAG &DAG) const { |
| 3199 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3200 | SDLoc dl(Op); |
| 3201 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); |
| 3202 | const TargetMachine &TM = getTargetMachine(); |
| 3203 | bool IsRO = isReadOnly(GV); |
| 3204 | |
| 3205 | // promoteToConstantPool only if not generating XO text section |
| 3206 | if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly()) |
| 3207 | if (SDValue V = promoteToConstantPool(this, GV, DAG, PtrVT, dl)) |
| 3208 | return V; |
| 3209 | |
| 3210 | if (isPositionIndependent()) { |
| 3211 | bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); |
| 3212 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, |
| 3213 | UseGOT_PREL ? ARMII::MO_GOT : 0); |
| 3214 | SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); |
| 3215 | if (UseGOT_PREL) |
| 3216 | Result = |
| 3217 | DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, |
| 3218 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
| 3219 | return Result; |
| 3220 | } else if (Subtarget->isROPI() && IsRO) { |
| 3221 | // PC-relative. |
| 3222 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT); |
| 3223 | SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); |
| 3224 | return Result; |
| 3225 | } else if (Subtarget->isRWPI() && !IsRO) { |
| 3226 | // SB-relative. |
| 3227 | SDValue RelAddr; |
| 3228 | if (Subtarget->useMovt()) { |
| 3229 | ++NumMovwMovt; |
| 3230 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL); |
| 3231 | RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G); |
| 3232 | } else { // use literal pool for address constant |
| 3233 | ARMConstantPoolValue *CPV = |
| 3234 | ARMConstantPoolConstant::Create(GV, ARMCP::SBREL); |
| 3235 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); |
| 3236 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 3237 | RelAddr = DAG.getLoad( |
| 3238 | PtrVT, dl, DAG.getEntryNode(), CPAddr, |
| 3239 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3240 | } |
| 3241 | SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT); |
| 3242 | SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr); |
| 3243 | return Result; |
| 3244 | } |
| 3245 | |
| 3246 | // If we have T2 ops, we can materialize the address directly via movt/movw |
| 3247 | // pair. This is always cheaper. |
| 3248 | if (Subtarget->useMovt()) { |
| 3249 | ++NumMovwMovt; |
| 3250 | // FIXME: Once remat is capable of dealing with instructions with register |
| 3251 | // operands, expand this into two nodes. |
| 3252 | return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, |
| 3253 | DAG.getTargetGlobalAddress(GV, dl, PtrVT)); |
| 3254 | } else { |
| 3255 | SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); |
| 3256 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 3257 | return DAG.getLoad( |
| 3258 | PtrVT, dl, DAG.getEntryNode(), CPAddr, |
| 3259 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3260 | } |
| 3261 | } |
| 3262 | |
| 3263 | SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, |
| 3264 | SelectionDAG &DAG) const { |
| 3265 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 3266 | "ROPI/RWPI not currently supported for Darwin" ); |
| 3267 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3268 | SDLoc dl(Op); |
| 3269 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); |
| 3270 | |
| 3271 | if (Subtarget->useMovt()) |
| 3272 | ++NumMovwMovt; |
| 3273 | |
| 3274 | // FIXME: Once remat is capable of dealing with instructions with register |
| 3275 | // operands, expand this into multiple nodes |
| 3276 | unsigned Wrapper = |
| 3277 | isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper; |
| 3278 | |
| 3279 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY); |
| 3280 | SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G); |
| 3281 | |
| 3282 | if (Subtarget->isGVIndirectSymbol(GV)) |
| 3283 | Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, |
| 3284 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
| 3285 | return Result; |
| 3286 | } |
| 3287 | |
| 3288 | SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, |
| 3289 | SelectionDAG &DAG) const { |
| 3290 | assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported" ); |
| 3291 | assert(Subtarget->useMovt() && |
| 3292 | "Windows on ARM expects to use movw/movt" ); |
| 3293 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 3294 | "ROPI/RWPI not currently supported for Windows" ); |
| 3295 | |
| 3296 | const TargetMachine &TM = getTargetMachine(); |
| 3297 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); |
| 3298 | ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG; |
| 3299 | if (GV->hasDLLImportStorageClass()) |
| 3300 | TargetFlags = ARMII::MO_DLLIMPORT; |
| 3301 | else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) |
| 3302 | TargetFlags = ARMII::MO_COFFSTUB; |
| 3303 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3304 | SDValue Result; |
| 3305 | SDLoc DL(Op); |
| 3306 | |
| 3307 | ++NumMovwMovt; |
| 3308 | |
| 3309 | // FIXME: Once remat is capable of dealing with instructions with register |
| 3310 | // operands, expand this into two nodes. |
| 3311 | Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, |
| 3312 | DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0, |
| 3313 | TargetFlags)); |
| 3314 | if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) |
| 3315 | Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, |
| 3316 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
| 3317 | return Result; |
| 3318 | } |
| 3319 | |
| 3320 | SDValue |
| 3321 | ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { |
| 3322 | SDLoc dl(Op); |
| 3323 | SDValue Val = DAG.getConstant(0, dl, MVT::i32); |
| 3324 | return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, |
| 3325 | DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), |
| 3326 | Op.getOperand(1), Val); |
| 3327 | } |
| 3328 | |
| 3329 | SDValue |
| 3330 | ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { |
| 3331 | SDLoc dl(Op); |
| 3332 | return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), |
| 3333 | Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); |
| 3334 | } |
| 3335 | |
| 3336 | SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, |
| 3337 | SelectionDAG &DAG) const { |
| 3338 | SDLoc dl(Op); |
| 3339 | return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other, |
| 3340 | Op.getOperand(0)); |
| 3341 | } |
| 3342 | |
| 3343 | SDValue |
| 3344 | ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, |
| 3345 | const ARMSubtarget *Subtarget) const { |
| 3346 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
| 3347 | SDLoc dl(Op); |
| 3348 | switch (IntNo) { |
| 3349 | default: return SDValue(); // Don't custom lower most intrinsics. |
| 3350 | case Intrinsic::thread_pointer: { |
| 3351 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3352 | return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); |
| 3353 | } |
| 3354 | case Intrinsic::eh_sjlj_lsda: { |
| 3355 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3356 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3357 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3358 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3359 | SDValue CPAddr; |
| 3360 | bool IsPositionIndependent = isPositionIndependent(); |
| 3361 | unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; |
| 3362 | ARMConstantPoolValue *CPV = |
| 3363 | ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex, |
| 3364 | ARMCP::CPLSDA, PCAdj); |
| 3365 | CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); |
| 3366 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 3367 | SDValue Result = DAG.getLoad( |
| 3368 | PtrVT, dl, DAG.getEntryNode(), CPAddr, |
| 3369 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3370 | |
| 3371 | if (IsPositionIndependent) { |
| 3372 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); |
| 3373 | Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); |
| 3374 | } |
| 3375 | return Result; |
| 3376 | } |
| 3377 | case Intrinsic::arm_neon_vabs: |
| 3378 | return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(), |
| 3379 | Op.getOperand(1)); |
| 3380 | case Intrinsic::arm_neon_vmulls: |
| 3381 | case Intrinsic::arm_neon_vmullu: { |
| 3382 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) |
| 3383 | ? ARMISD::VMULLs : ARMISD::VMULLu; |
| 3384 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
| 3385 | Op.getOperand(1), Op.getOperand(2)); |
| 3386 | } |
| 3387 | case Intrinsic::arm_neon_vminnm: |
| 3388 | case Intrinsic::arm_neon_vmaxnm: { |
| 3389 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) |
| 3390 | ? ISD::FMINNUM : ISD::FMAXNUM; |
| 3391 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
| 3392 | Op.getOperand(1), Op.getOperand(2)); |
| 3393 | } |
| 3394 | case Intrinsic::arm_neon_vminu: |
| 3395 | case Intrinsic::arm_neon_vmaxu: { |
| 3396 | if (Op.getValueType().isFloatingPoint()) |
| 3397 | return SDValue(); |
| 3398 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) |
| 3399 | ? ISD::UMIN : ISD::UMAX; |
| 3400 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
| 3401 | Op.getOperand(1), Op.getOperand(2)); |
| 3402 | } |
| 3403 | case Intrinsic::arm_neon_vmins: |
| 3404 | case Intrinsic::arm_neon_vmaxs: { |
| 3405 | // v{min,max}s is overloaded between signed integers and floats. |
| 3406 | if (!Op.getValueType().isFloatingPoint()) { |
| 3407 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
| 3408 | ? ISD::SMIN : ISD::SMAX; |
| 3409 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
| 3410 | Op.getOperand(1), Op.getOperand(2)); |
| 3411 | } |
| 3412 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
| 3413 | ? ISD::FMINIMUM : ISD::FMAXIMUM; |
| 3414 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
| 3415 | Op.getOperand(1), Op.getOperand(2)); |
| 3416 | } |
| 3417 | case Intrinsic::arm_neon_vtbl1: |
| 3418 | return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(), |
| 3419 | Op.getOperand(1), Op.getOperand(2)); |
| 3420 | case Intrinsic::arm_neon_vtbl2: |
| 3421 | return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(), |
| 3422 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
| 3423 | } |
| 3424 | } |
| 3425 | |
| 3426 | static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, |
| 3427 | const ARMSubtarget *Subtarget) { |
| 3428 | SDLoc dl(Op); |
| 3429 | ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2)); |
| 3430 | auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue()); |
| 3431 | if (SSID == SyncScope::SingleThread) |
| 3432 | return Op; |
| 3433 | |
| 3434 | if (!Subtarget->hasDataBarrier()) { |
| 3435 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
| 3436 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
| 3437 | // here. |
| 3438 | assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && |
| 3439 | "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!" ); |
| 3440 | return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), |
| 3441 | DAG.getConstant(0, dl, MVT::i32)); |
| 3442 | } |
| 3443 | |
| 3444 | ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1)); |
| 3445 | AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue()); |
| 3446 | ARM_MB::MemBOpt Domain = ARM_MB::ISH; |
| 3447 | if (Subtarget->isMClass()) { |
| 3448 | // Only a full system barrier exists in the M-class architectures. |
| 3449 | Domain = ARM_MB::SY; |
| 3450 | } else if (Subtarget->preferISHSTBarriers() && |
| 3451 | Ord == AtomicOrdering::Release) { |
| 3452 | // Swift happens to implement ISHST barriers in a way that's compatible with |
| 3453 | // Release semantics but weaker than ISH so we'd be fools not to use |
| 3454 | // it. Beware: other processors probably don't! |
| 3455 | Domain = ARM_MB::ISHST; |
| 3456 | } |
| 3457 | |
| 3458 | return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0), |
| 3459 | DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32), |
| 3460 | DAG.getConstant(Domain, dl, MVT::i32)); |
| 3461 | } |
| 3462 | |
| 3463 | static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, |
| 3464 | const ARMSubtarget *Subtarget) { |
| 3465 | // ARM pre v5TE and Thumb1 does not have preload instructions. |
| 3466 | if (!(Subtarget->isThumb2() || |
| 3467 | (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) |
| 3468 | // Just preserve the chain. |
| 3469 | return Op.getOperand(0); |
| 3470 | |
| 3471 | SDLoc dl(Op); |
| 3472 | unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; |
| 3473 | if (!isRead && |
| 3474 | (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) |
| 3475 | // ARMv7 with MP extension has PLDW. |
| 3476 | return Op.getOperand(0); |
| 3477 | |
| 3478 | unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); |
| 3479 | if (Subtarget->isThumb()) { |
| 3480 | // Invert the bits. |
| 3481 | isRead = ~isRead & 1; |
| 3482 | isData = ~isData & 1; |
| 3483 | } |
| 3484 | |
| 3485 | return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), |
| 3486 | Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32), |
| 3487 | DAG.getConstant(isData, dl, MVT::i32)); |
| 3488 | } |
| 3489 | |
| 3490 | static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { |
| 3491 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3492 | ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); |
| 3493 | |
| 3494 | // vastart just stores the address of the VarArgsFrameIndex slot into the |
| 3495 | // memory location argument. |
| 3496 | SDLoc dl(Op); |
| 3497 | EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); |
| 3498 | SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); |
| 3499 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); |
| 3500 | return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), |
| 3501 | MachinePointerInfo(SV)); |
| 3502 | } |
| 3503 | |
| 3504 | SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, |
| 3505 | CCValAssign &NextVA, |
| 3506 | SDValue &Root, |
| 3507 | SelectionDAG &DAG, |
| 3508 | const SDLoc &dl) const { |
| 3509 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3510 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3511 | |
| 3512 | const TargetRegisterClass *RC; |
| 3513 | if (AFI->isThumb1OnlyFunction()) |
| 3514 | RC = &ARM::tGPRRegClass; |
| 3515 | else |
| 3516 | RC = &ARM::GPRRegClass; |
| 3517 | |
| 3518 | // Transform the arguments stored in physical registers into virtual ones. |
| 3519 | unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); |
| 3520 | SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); |
| 3521 | |
| 3522 | SDValue ArgValue2; |
| 3523 | if (NextVA.isMemLoc()) { |
| 3524 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 3525 | int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true); |
| 3526 | |
| 3527 | // Create load node to retrieve arguments from the stack. |
| 3528 | SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); |
| 3529 | ArgValue2 = DAG.getLoad( |
| 3530 | MVT::i32, dl, Root, FIN, |
| 3531 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); |
| 3532 | } else { |
| 3533 | Reg = MF.addLiveIn(NextVA.getLocReg(), RC); |
| 3534 | ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); |
| 3535 | } |
| 3536 | if (!Subtarget->isLittle()) |
| 3537 | std::swap (ArgValue, ArgValue2); |
| 3538 | return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); |
| 3539 | } |
| 3540 | |
| 3541 | // The remaining GPRs hold either the beginning of variable-argument |
| 3542 | // data, or the beginning of an aggregate passed by value (usually |
| 3543 | // byval). Either way, we allocate stack slots adjacent to the data |
| 3544 | // provided by our caller, and store the unallocated registers there. |
| 3545 | // If this is a variadic function, the va_list pointer will begin with |
| 3546 | // these values; otherwise, this reassembles a (byval) structure that |
| 3547 | // was split between registers and memory. |
| 3548 | // Return: The frame index registers were stored into. |
| 3549 | int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, |
| 3550 | const SDLoc &dl, SDValue &Chain, |
| 3551 | const Value *OrigArg, |
| 3552 | unsigned InRegsParamRecordIdx, |
| 3553 | int ArgOffset, unsigned ArgSize) const { |
| 3554 | // Currently, two use-cases possible: |
| 3555 | // Case #1. Non-var-args function, and we meet first byval parameter. |
| 3556 | // Setup first unallocated register as first byval register; |
| 3557 | // eat all remained registers |
| 3558 | // (these two actions are performed by HandleByVal method). |
| 3559 | // Then, here, we initialize stack frame with |
| 3560 | // "store-reg" instructions. |
| 3561 | // Case #2. Var-args function, that doesn't contain byval parameters. |
| 3562 | // The same: eat all remained unallocated registers, |
| 3563 | // initialize stack frame. |
| 3564 | |
| 3565 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3566 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 3567 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3568 | unsigned RBegin, REnd; |
| 3569 | if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { |
| 3570 | CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); |
| 3571 | } else { |
| 3572 | unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs); |
| 3573 | RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; |
| 3574 | REnd = ARM::R4; |
| 3575 | } |
| 3576 | |
| 3577 | if (REnd != RBegin) |
| 3578 | ArgOffset = -4 * (ARM::R4 - RBegin); |
| 3579 | |
| 3580 | auto PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3581 | int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false); |
| 3582 | SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT); |
| 3583 | |
| 3584 | SmallVector<SDValue, 4> MemOps; |
| 3585 | const TargetRegisterClass *RC = |
| 3586 | AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
| 3587 | |
| 3588 | for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { |
| 3589 | unsigned VReg = MF.addLiveIn(Reg, RC); |
| 3590 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); |
| 3591 | SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, |
| 3592 | MachinePointerInfo(OrigArg, 4 * i)); |
| 3593 | MemOps.push_back(Store); |
| 3594 | FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT)); |
| 3595 | } |
| 3596 | |
| 3597 | if (!MemOps.empty()) |
| 3598 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); |
| 3599 | return FrameIndex; |
| 3600 | } |
| 3601 | |
| 3602 | // Setup stack frame, the va_list pointer will start from. |
| 3603 | void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, |
| 3604 | const SDLoc &dl, SDValue &Chain, |
| 3605 | unsigned ArgOffset, |
| 3606 | unsigned TotalArgRegsSaveSize, |
| 3607 | bool ForceMutable) const { |
| 3608 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3609 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3610 | |
| 3611 | // Try to store any remaining integer argument regs |
| 3612 | // to their spots on the stack so that they may be loaded by dereferencing |
| 3613 | // the result of va_next. |
| 3614 | // If there is no regs to be stored, just point address after last |
| 3615 | // argument passed via stack. |
| 3616 | int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr, |
| 3617 | CCInfo.getInRegsParamsCount(), |
| 3618 | CCInfo.getNextStackOffset(), 4); |
| 3619 | AFI->setVarArgsFrameIndex(FrameIndex); |
| 3620 | } |
| 3621 | |
| 3622 | SDValue ARMTargetLowering::LowerFormalArguments( |
| 3623 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 3624 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 3625 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
| 3626 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3627 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 3628 | |
| 3629 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3630 | |
| 3631 | // Assign locations to all of the incoming arguments. |
| 3632 | SmallVector<CCValAssign, 16> ArgLocs; |
| 3633 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
| 3634 | *DAG.getContext()); |
| 3635 | CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg)); |
| 3636 | |
| 3637 | SmallVector<SDValue, 16> ArgValues; |
| 3638 | SDValue ArgValue; |
| 3639 | Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); |
| 3640 | unsigned CurArgIdx = 0; |
| 3641 | |
| 3642 | // Initially ArgRegsSaveSize is zero. |
| 3643 | // Then we increase this value each time we meet byval parameter. |
| 3644 | // We also increase this value in case of varargs function. |
| 3645 | AFI->setArgRegsSaveSize(0); |
| 3646 | |
| 3647 | // Calculate the amount of stack space that we need to allocate to store |
| 3648 | // byval and variadic arguments that are passed in registers. |
| 3649 | // We need to know this before we allocate the first byval or variadic |
| 3650 | // argument, as they will be allocated a stack slot below the CFA (Canonical |
| 3651 | // Frame Address, the stack pointer at entry to the function). |
| 3652 | unsigned ArgRegBegin = ARM::R4; |
| 3653 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
| 3654 | if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) |
| 3655 | break; |
| 3656 | |
| 3657 | CCValAssign &VA = ArgLocs[i]; |
| 3658 | unsigned Index = VA.getValNo(); |
| 3659 | ISD::ArgFlagsTy Flags = Ins[Index].Flags; |
| 3660 | if (!Flags.isByVal()) |
| 3661 | continue; |
| 3662 | |
| 3663 | assert(VA.isMemLoc() && "unexpected byval pointer in reg" ); |
| 3664 | unsigned RBegin, REnd; |
| 3665 | CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd); |
| 3666 | ArgRegBegin = std::min(ArgRegBegin, RBegin); |
| 3667 | |
| 3668 | CCInfo.nextInRegsParam(); |
| 3669 | } |
| 3670 | CCInfo.rewindByValRegsInfo(); |
| 3671 | |
| 3672 | int lastInsIndex = -1; |
| 3673 | if (isVarArg && MFI.hasVAStart()) { |
| 3674 | unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs); |
| 3675 | if (RegIdx != array_lengthof(GPRArgRegs)) |
| 3676 | ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]); |
| 3677 | } |
| 3678 | |
| 3679 | unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); |
| 3680 | AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); |
| 3681 | auto PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3682 | |
| 3683 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
| 3684 | CCValAssign &VA = ArgLocs[i]; |
| 3685 | if (Ins[VA.getValNo()].isOrigArg()) { |
| 3686 | std::advance(CurOrigArg, |
| 3687 | Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); |
| 3688 | CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); |
| 3689 | } |
| 3690 | // Arguments stored in registers. |
| 3691 | if (VA.isRegLoc()) { |
| 3692 | EVT RegVT = VA.getLocVT(); |
| 3693 | |
| 3694 | if (VA.needsCustom()) { |
| 3695 | // f64 and vector types are split up into multiple registers or |
| 3696 | // combinations of registers and stack slots. |
| 3697 | if (VA.getLocVT() == MVT::v2f64) { |
| 3698 | SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], |
| 3699 | Chain, DAG, dl); |
| 3700 | VA = ArgLocs[++i]; // skip ahead to next loc |
| 3701 | SDValue ArgValue2; |
| 3702 | if (VA.isMemLoc()) { |
| 3703 | int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true); |
| 3704 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
| 3705 | ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, |
| 3706 | MachinePointerInfo::getFixedStack( |
| 3707 | DAG.getMachineFunction(), FI)); |
| 3708 | } else { |
| 3709 | ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], |
| 3710 | Chain, DAG, dl); |
| 3711 | } |
| 3712 | ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); |
| 3713 | ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, |
| 3714 | ArgValue, ArgValue1, |
| 3715 | DAG.getIntPtrConstant(0, dl)); |
| 3716 | ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, |
| 3717 | ArgValue, ArgValue2, |
| 3718 | DAG.getIntPtrConstant(1, dl)); |
| 3719 | } else |
| 3720 | ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); |
| 3721 | } else { |
| 3722 | const TargetRegisterClass *RC; |
| 3723 | |
| 3724 | |
| 3725 | if (RegVT == MVT::f16) |
| 3726 | RC = &ARM::HPRRegClass; |
| 3727 | else if (RegVT == MVT::f32) |
| 3728 | RC = &ARM::SPRRegClass; |
| 3729 | else if (RegVT == MVT::f64 || RegVT == MVT::v4f16) |
| 3730 | RC = &ARM::DPRRegClass; |
| 3731 | else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16) |
| 3732 | RC = &ARM::QPRRegClass; |
| 3733 | else if (RegVT == MVT::i32) |
| 3734 | RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass |
| 3735 | : &ARM::GPRRegClass; |
| 3736 | else |
| 3737 | llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering" ); |
| 3738 | |
| 3739 | // Transform the arguments in physical registers into virtual ones. |
| 3740 | unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); |
| 3741 | ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); |
| 3742 | } |
| 3743 | |
| 3744 | // If this is an 8 or 16-bit value, it is really passed promoted |
| 3745 | // to 32 bits. Insert an assert[sz]ext to capture this, then |
| 3746 | // truncate to the right size. |
| 3747 | switch (VA.getLocInfo()) { |
| 3748 | default: llvm_unreachable("Unknown loc info!" ); |
| 3749 | case CCValAssign::Full: break; |
| 3750 | case CCValAssign::BCvt: |
| 3751 | ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); |
| 3752 | break; |
| 3753 | case CCValAssign::SExt: |
| 3754 | ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, |
| 3755 | DAG.getValueType(VA.getValVT())); |
| 3756 | ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); |
| 3757 | break; |
| 3758 | case CCValAssign::ZExt: |
| 3759 | ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, |
| 3760 | DAG.getValueType(VA.getValVT())); |
| 3761 | ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); |
| 3762 | break; |
| 3763 | } |
| 3764 | |
| 3765 | InVals.push_back(ArgValue); |
| 3766 | } else { // VA.isRegLoc() |
| 3767 | // sanity check |
| 3768 | assert(VA.isMemLoc()); |
| 3769 | assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered" ); |
| 3770 | |
| 3771 | int index = VA.getValNo(); |
| 3772 | |
| 3773 | // Some Ins[] entries become multiple ArgLoc[] entries. |
| 3774 | // Process them only once. |
| 3775 | if (index != lastInsIndex) |
| 3776 | { |
| 3777 | ISD::ArgFlagsTy Flags = Ins[index].Flags; |
| 3778 | // FIXME: For now, all byval parameter objects are marked mutable. |
| 3779 | // This can be changed with more analysis. |
| 3780 | // In case of tail call optimization mark all arguments mutable. |
| 3781 | // Since they could be overwritten by lowering of arguments in case of |
| 3782 | // a tail call. |
| 3783 | if (Flags.isByVal()) { |
| 3784 | assert(Ins[index].isOrigArg() && |
| 3785 | "Byval arguments cannot be implicit" ); |
| 3786 | unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); |
| 3787 | |
| 3788 | int FrameIndex = StoreByValRegs( |
| 3789 | CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex, |
| 3790 | VA.getLocMemOffset(), Flags.getByValSize()); |
| 3791 | InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT)); |
| 3792 | CCInfo.nextInRegsParam(); |
| 3793 | } else { |
| 3794 | unsigned FIOffset = VA.getLocMemOffset(); |
| 3795 | int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8, |
| 3796 | FIOffset, true); |
| 3797 | |
| 3798 | // Create load nodes to retrieve arguments from the stack. |
| 3799 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
| 3800 | InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, |
| 3801 | MachinePointerInfo::getFixedStack( |
| 3802 | DAG.getMachineFunction(), FI))); |
| 3803 | } |
| 3804 | lastInsIndex = index; |
| 3805 | } |
| 3806 | } |
| 3807 | } |
| 3808 | |
| 3809 | // varargs |
| 3810 | if (isVarArg && MFI.hasVAStart()) |
| 3811 | VarArgStyleRegisters(CCInfo, DAG, dl, Chain, |
| 3812 | CCInfo.getNextStackOffset(), |
| 3813 | TotalArgRegsSaveSize); |
| 3814 | |
| 3815 | AFI->setArgumentStackSize(CCInfo.getNextStackOffset()); |
| 3816 | |
| 3817 | return Chain; |
| 3818 | } |
| 3819 | |
| 3820 | /// isFloatingPointZero - Return true if this is +0.0. |
| 3821 | static bool isFloatingPointZero(SDValue Op) { |
| 3822 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) |
| 3823 | return CFP->getValueAPF().isPosZero(); |
| 3824 | else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { |
| 3825 | // Maybe this has already been legalized into the constant pool? |
| 3826 | if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { |
| 3827 | SDValue WrapperOp = Op.getOperand(1).getOperand(0); |
| 3828 | if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) |
| 3829 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) |
| 3830 | return CFP->getValueAPF().isPosZero(); |
| 3831 | } |
| 3832 | } else if (Op->getOpcode() == ISD::BITCAST && |
| 3833 | Op->getValueType(0) == MVT::f64) { |
| 3834 | // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) |
| 3835 | // created by LowerConstantFP(). |
| 3836 | SDValue BitcastOp = Op->getOperand(0); |
| 3837 | if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && |
| 3838 | isNullConstant(BitcastOp->getOperand(0))) |
| 3839 | return true; |
| 3840 | } |
| 3841 | return false; |
| 3842 | } |
| 3843 | |
| 3844 | /// Returns appropriate ARM CMP (cmp) and corresponding condition code for |
| 3845 | /// the given operands. |
| 3846 | SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, |
| 3847 | SDValue &ARMcc, SelectionDAG &DAG, |
| 3848 | const SDLoc &dl) const { |
| 3849 | if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { |
| 3850 | unsigned C = RHSC->getZExtValue(); |
| 3851 | if (!isLegalICmpImmediate((int32_t)C)) { |
| 3852 | // Constant does not fit, try adjusting it by one. |
| 3853 | switch (CC) { |
| 3854 | default: break; |
| 3855 | case ISD::SETLT: |
| 3856 | case ISD::SETGE: |
| 3857 | if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { |
| 3858 | CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; |
| 3859 | RHS = DAG.getConstant(C - 1, dl, MVT::i32); |
| 3860 | } |
| 3861 | break; |
| 3862 | case ISD::SETULT: |
| 3863 | case ISD::SETUGE: |
| 3864 | if (C != 0 && isLegalICmpImmediate(C-1)) { |
| 3865 | CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; |
| 3866 | RHS = DAG.getConstant(C - 1, dl, MVT::i32); |
| 3867 | } |
| 3868 | break; |
| 3869 | case ISD::SETLE: |
| 3870 | case ISD::SETGT: |
| 3871 | if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { |
| 3872 | CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; |
| 3873 | RHS = DAG.getConstant(C + 1, dl, MVT::i32); |
| 3874 | } |
| 3875 | break; |
| 3876 | case ISD::SETULE: |
| 3877 | case ISD::SETUGT: |
| 3878 | if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { |
| 3879 | CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; |
| 3880 | RHS = DAG.getConstant(C + 1, dl, MVT::i32); |
| 3881 | } |
| 3882 | break; |
| 3883 | } |
| 3884 | } |
| 3885 | } else if ((ARM_AM::getShiftOpcForNode(LHS.getOpcode()) != ARM_AM::no_shift) && |
| 3886 | (ARM_AM::getShiftOpcForNode(RHS.getOpcode()) == ARM_AM::no_shift)) { |
| 3887 | // In ARM and Thumb-2, the compare instructions can shift their second |
| 3888 | // operand. |
| 3889 | CC = ISD::getSetCCSwappedOperands(CC); |
| 3890 | std::swap(LHS, RHS); |
| 3891 | } |
| 3892 | |
| 3893 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 3894 | ARMISD::NodeType CompareType; |
| 3895 | switch (CondCode) { |
| 3896 | default: |
| 3897 | CompareType = ARMISD::CMP; |
| 3898 | break; |
| 3899 | case ARMCC::EQ: |
| 3900 | case ARMCC::NE: |
| 3901 | // Uses only Z Flag |
| 3902 | CompareType = ARMISD::CMPZ; |
| 3903 | break; |
| 3904 | } |
| 3905 | ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); |
| 3906 | return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); |
| 3907 | } |
| 3908 | |
| 3909 | /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. |
| 3910 | SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, |
| 3911 | SelectionDAG &DAG, const SDLoc &dl, |
| 3912 | bool InvalidOnQNaN) const { |
| 3913 | assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64); |
| 3914 | SDValue Cmp; |
| 3915 | SDValue C = DAG.getConstant(InvalidOnQNaN, dl, MVT::i32); |
| 3916 | if (!isFloatingPointZero(RHS)) |
| 3917 | Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS, C); |
| 3918 | else |
| 3919 | Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS, C); |
| 3920 | return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); |
| 3921 | } |
| 3922 | |
| 3923 | /// duplicateCmp - Glue values can have only one use, so this function |
| 3924 | /// duplicates a comparison node. |
| 3925 | SDValue |
| 3926 | ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { |
| 3927 | unsigned Opc = Cmp.getOpcode(); |
| 3928 | SDLoc DL(Cmp); |
| 3929 | if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) |
| 3930 | return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); |
| 3931 | |
| 3932 | assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation" ); |
| 3933 | Cmp = Cmp.getOperand(0); |
| 3934 | Opc = Cmp.getOpcode(); |
| 3935 | if (Opc == ARMISD::CMPFP) |
| 3936 | Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0), |
| 3937 | Cmp.getOperand(1), Cmp.getOperand(2)); |
| 3938 | else { |
| 3939 | assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT" ); |
| 3940 | Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0), |
| 3941 | Cmp.getOperand(1)); |
| 3942 | } |
| 3943 | return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); |
| 3944 | } |
| 3945 | |
| 3946 | // This function returns three things: the arithmetic computation itself |
| 3947 | // (Value), a comparison (OverflowCmp), and a condition code (ARMcc). The |
| 3948 | // comparison and the condition code define the case in which the arithmetic |
| 3949 | // computation *does not* overflow. |
| 3950 | std::pair<SDValue, SDValue> |
| 3951 | ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, |
| 3952 | SDValue &ARMcc) const { |
| 3953 | assert(Op.getValueType() == MVT::i32 && "Unsupported value type" ); |
| 3954 | |
| 3955 | SDValue Value, OverflowCmp; |
| 3956 | SDValue LHS = Op.getOperand(0); |
| 3957 | SDValue RHS = Op.getOperand(1); |
| 3958 | SDLoc dl(Op); |
| 3959 | |
| 3960 | // FIXME: We are currently always generating CMPs because we don't support |
| 3961 | // generating CMN through the backend. This is not as good as the natural |
| 3962 | // CMP case because it causes a register dependency and cannot be folded |
| 3963 | // later. |
| 3964 | |
| 3965 | switch (Op.getOpcode()) { |
| 3966 | default: |
| 3967 | llvm_unreachable("Unknown overflow instruction!" ); |
| 3968 | case ISD::SADDO: |
| 3969 | ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); |
| 3970 | Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS); |
| 3971 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); |
| 3972 | break; |
| 3973 | case ISD::UADDO: |
| 3974 | ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); |
| 3975 | // We use ADDC here to correspond to its use in LowerUnsignedALUO. |
| 3976 | // We do not use it in the USUBO case as Value may not be used. |
| 3977 | Value = DAG.getNode(ARMISD::ADDC, dl, |
| 3978 | DAG.getVTList(Op.getValueType(), MVT::i32), LHS, RHS) |
| 3979 | .getValue(0); |
| 3980 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); |
| 3981 | break; |
| 3982 | case ISD::SSUBO: |
| 3983 | ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); |
| 3984 | Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); |
| 3985 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); |
| 3986 | break; |
| 3987 | case ISD::USUBO: |
| 3988 | ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); |
| 3989 | Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); |
| 3990 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); |
| 3991 | break; |
| 3992 | case ISD::UMULO: |
| 3993 | // We generate a UMUL_LOHI and then check if the high word is 0. |
| 3994 | ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); |
| 3995 | Value = DAG.getNode(ISD::UMUL_LOHI, dl, |
| 3996 | DAG.getVTList(Op.getValueType(), Op.getValueType()), |
| 3997 | LHS, RHS); |
| 3998 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), |
| 3999 | DAG.getConstant(0, dl, MVT::i32)); |
| 4000 | Value = Value.getValue(0); // We only want the low 32 bits for the result. |
| 4001 | break; |
| 4002 | case ISD::SMULO: |
| 4003 | // We generate a SMUL_LOHI and then check if all the bits of the high word |
| 4004 | // are the same as the sign bit of the low word. |
| 4005 | ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); |
| 4006 | Value = DAG.getNode(ISD::SMUL_LOHI, dl, |
| 4007 | DAG.getVTList(Op.getValueType(), Op.getValueType()), |
| 4008 | LHS, RHS); |
| 4009 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), |
| 4010 | DAG.getNode(ISD::SRA, dl, Op.getValueType(), |
| 4011 | Value.getValue(0), |
| 4012 | DAG.getConstant(31, dl, MVT::i32))); |
| 4013 | Value = Value.getValue(0); // We only want the low 32 bits for the result. |
| 4014 | break; |
| 4015 | } // switch (...) |
| 4016 | |
| 4017 | return std::make_pair(Value, OverflowCmp); |
| 4018 | } |
| 4019 | |
| 4020 | SDValue |
| 4021 | ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const { |
| 4022 | // Let legalize expand this if it isn't a legal type yet. |
| 4023 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) |
| 4024 | return SDValue(); |
| 4025 | |
| 4026 | SDValue Value, OverflowCmp; |
| 4027 | SDValue ARMcc; |
| 4028 | std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); |
| 4029 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 4030 | SDLoc dl(Op); |
| 4031 | // We use 0 and 1 as false and true values. |
| 4032 | SDValue TVal = DAG.getConstant(1, dl, MVT::i32); |
| 4033 | SDValue FVal = DAG.getConstant(0, dl, MVT::i32); |
| 4034 | EVT VT = Op.getValueType(); |
| 4035 | |
| 4036 | SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal, |
| 4037 | ARMcc, CCR, OverflowCmp); |
| 4038 | |
| 4039 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); |
| 4040 | return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); |
| 4041 | } |
| 4042 | |
| 4043 | static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, |
| 4044 | SelectionDAG &DAG) { |
| 4045 | SDLoc DL(BoolCarry); |
| 4046 | EVT CarryVT = BoolCarry.getValueType(); |
| 4047 | |
| 4048 | // This converts the boolean value carry into the carry flag by doing |
| 4049 | // ARMISD::SUBC Carry, 1 |
| 4050 | SDValue Carry = DAG.getNode(ARMISD::SUBC, DL, |
| 4051 | DAG.getVTList(CarryVT, MVT::i32), |
| 4052 | BoolCarry, DAG.getConstant(1, DL, CarryVT)); |
| 4053 | return Carry.getValue(1); |
| 4054 | } |
| 4055 | |
| 4056 | static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, |
| 4057 | SelectionDAG &DAG) { |
| 4058 | SDLoc DL(Flags); |
| 4059 | |
| 4060 | // Now convert the carry flag into a boolean carry. We do this |
| 4061 | // using ARMISD:ADDE 0, 0, Carry |
| 4062 | return DAG.getNode(ARMISD::ADDE, DL, DAG.getVTList(VT, MVT::i32), |
| 4063 | DAG.getConstant(0, DL, MVT::i32), |
| 4064 | DAG.getConstant(0, DL, MVT::i32), Flags); |
| 4065 | } |
| 4066 | |
| 4067 | SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op, |
| 4068 | SelectionDAG &DAG) const { |
| 4069 | // Let legalize expand this if it isn't a legal type yet. |
| 4070 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) |
| 4071 | return SDValue(); |
| 4072 | |
| 4073 | SDValue LHS = Op.getOperand(0); |
| 4074 | SDValue RHS = Op.getOperand(1); |
| 4075 | SDLoc dl(Op); |
| 4076 | |
| 4077 | EVT VT = Op.getValueType(); |
| 4078 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
| 4079 | SDValue Value; |
| 4080 | SDValue Overflow; |
| 4081 | switch (Op.getOpcode()) { |
| 4082 | default: |
| 4083 | llvm_unreachable("Unknown overflow instruction!" ); |
| 4084 | case ISD::UADDO: |
| 4085 | Value = DAG.getNode(ARMISD::ADDC, dl, VTs, LHS, RHS); |
| 4086 | // Convert the carry flag into a boolean value. |
| 4087 | Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); |
| 4088 | break; |
| 4089 | case ISD::USUBO: { |
| 4090 | Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS); |
| 4091 | // Convert the carry flag into a boolean value. |
| 4092 | Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); |
| 4093 | // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow |
| 4094 | // value. So compute 1 - C. |
| 4095 | Overflow = DAG.getNode(ISD::SUB, dl, MVT::i32, |
| 4096 | DAG.getConstant(1, dl, MVT::i32), Overflow); |
| 4097 | break; |
| 4098 | } |
| 4099 | } |
| 4100 | |
| 4101 | return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); |
| 4102 | } |
| 4103 | |
| 4104 | SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { |
| 4105 | SDValue Cond = Op.getOperand(0); |
| 4106 | SDValue SelectTrue = Op.getOperand(1); |
| 4107 | SDValue SelectFalse = Op.getOperand(2); |
| 4108 | SDLoc dl(Op); |
| 4109 | unsigned Opc = Cond.getOpcode(); |
| 4110 | |
| 4111 | if (Cond.getResNo() == 1 && |
| 4112 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 4113 | Opc == ISD::USUBO)) { |
| 4114 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) |
| 4115 | return SDValue(); |
| 4116 | |
| 4117 | SDValue Value, OverflowCmp; |
| 4118 | SDValue ARMcc; |
| 4119 | std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); |
| 4120 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 4121 | EVT VT = Op.getValueType(); |
| 4122 | |
| 4123 | return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR, |
| 4124 | OverflowCmp, DAG); |
| 4125 | } |
| 4126 | |
| 4127 | // Convert: |
| 4128 | // |
| 4129 | // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) |
| 4130 | // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) |
| 4131 | // |
| 4132 | if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { |
| 4133 | const ConstantSDNode *CMOVTrue = |
| 4134 | dyn_cast<ConstantSDNode>(Cond.getOperand(0)); |
| 4135 | const ConstantSDNode *CMOVFalse = |
| 4136 | dyn_cast<ConstantSDNode>(Cond.getOperand(1)); |
| 4137 | |
| 4138 | if (CMOVTrue && CMOVFalse) { |
| 4139 | unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); |
| 4140 | unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); |
| 4141 | |
| 4142 | SDValue True; |
| 4143 | SDValue False; |
| 4144 | if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { |
| 4145 | True = SelectTrue; |
| 4146 | False = SelectFalse; |
| 4147 | } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { |
| 4148 | True = SelectFalse; |
| 4149 | False = SelectTrue; |
| 4150 | } |
| 4151 | |
| 4152 | if (True.getNode() && False.getNode()) { |
| 4153 | EVT VT = Op.getValueType(); |
| 4154 | SDValue ARMcc = Cond.getOperand(2); |
| 4155 | SDValue CCR = Cond.getOperand(3); |
| 4156 | SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); |
| 4157 | assert(True.getValueType() == VT); |
| 4158 | return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG); |
| 4159 | } |
| 4160 | } |
| 4161 | } |
| 4162 | |
| 4163 | // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the |
| 4164 | // undefined bits before doing a full-word comparison with zero. |
| 4165 | Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, |
| 4166 | DAG.getConstant(1, dl, Cond.getValueType())); |
| 4167 | |
| 4168 | return DAG.getSelectCC(dl, Cond, |
| 4169 | DAG.getConstant(0, dl, Cond.getValueType()), |
| 4170 | SelectTrue, SelectFalse, ISD::SETNE); |
| 4171 | } |
| 4172 | |
| 4173 | static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
| 4174 | bool &swpCmpOps, bool &swpVselOps) { |
| 4175 | // Start by selecting the GE condition code for opcodes that return true for |
| 4176 | // 'equality' |
| 4177 | if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || |
| 4178 | CC == ISD::SETULE || CC == ISD::SETGE || CC == ISD::SETLE) |
| 4179 | CondCode = ARMCC::GE; |
| 4180 | |
| 4181 | // and GT for opcodes that return false for 'equality'. |
| 4182 | else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || |
| 4183 | CC == ISD::SETULT || CC == ISD::SETGT || CC == ISD::SETLT) |
| 4184 | CondCode = ARMCC::GT; |
| 4185 | |
| 4186 | // Since we are constrained to GE/GT, if the opcode contains 'less', we need |
| 4187 | // to swap the compare operands. |
| 4188 | if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || |
| 4189 | CC == ISD::SETULT || CC == ISD::SETLE || CC == ISD::SETLT) |
| 4190 | swpCmpOps = true; |
| 4191 | |
| 4192 | // Both GT and GE are ordered comparisons, and return false for 'unordered'. |
| 4193 | // If we have an unordered opcode, we need to swap the operands to the VSEL |
| 4194 | // instruction (effectively negating the condition). |
| 4195 | // |
| 4196 | // This also has the effect of swapping which one of 'less' or 'greater' |
| 4197 | // returns true, so we also swap the compare operands. It also switches |
| 4198 | // whether we return true for 'equality', so we compensate by picking the |
| 4199 | // opposite condition code to our original choice. |
| 4200 | if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || |
| 4201 | CC == ISD::SETUGT) { |
| 4202 | swpCmpOps = !swpCmpOps; |
| 4203 | swpVselOps = !swpVselOps; |
| 4204 | CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; |
| 4205 | } |
| 4206 | |
| 4207 | // 'ordered' is 'anything but unordered', so use the VS condition code and |
| 4208 | // swap the VSEL operands. |
| 4209 | if (CC == ISD::SETO) { |
| 4210 | CondCode = ARMCC::VS; |
| 4211 | swpVselOps = true; |
| 4212 | } |
| 4213 | |
| 4214 | // 'unordered or not equal' is 'anything but equal', so use the EQ condition |
| 4215 | // code and swap the VSEL operands. Also do this if we don't care about the |
| 4216 | // unordered case. |
| 4217 | if (CC == ISD::SETUNE || CC == ISD::SETNE) { |
| 4218 | CondCode = ARMCC::EQ; |
| 4219 | swpVselOps = true; |
| 4220 | } |
| 4221 | } |
| 4222 | |
| 4223 | SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, |
| 4224 | SDValue TrueVal, SDValue ARMcc, SDValue CCR, |
| 4225 | SDValue Cmp, SelectionDAG &DAG) const { |
| 4226 | if (!Subtarget->hasFP64() && VT == MVT::f64) { |
| 4227 | FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 4228 | DAG.getVTList(MVT::i32, MVT::i32), FalseVal); |
| 4229 | TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 4230 | DAG.getVTList(MVT::i32, MVT::i32), TrueVal); |
| 4231 | |
| 4232 | SDValue TrueLow = TrueVal.getValue(0); |
| 4233 | SDValue TrueHigh = TrueVal.getValue(1); |
| 4234 | SDValue FalseLow = FalseVal.getValue(0); |
| 4235 | SDValue FalseHigh = FalseVal.getValue(1); |
| 4236 | |
| 4237 | SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow, |
| 4238 | ARMcc, CCR, Cmp); |
| 4239 | SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh, |
| 4240 | ARMcc, CCR, duplicateCmp(Cmp, DAG)); |
| 4241 | |
| 4242 | return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High); |
| 4243 | } else { |
| 4244 | return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR, |
| 4245 | Cmp); |
| 4246 | } |
| 4247 | } |
| 4248 | |
| 4249 | static bool isGTorGE(ISD::CondCode CC) { |
| 4250 | return CC == ISD::SETGT || CC == ISD::SETGE; |
| 4251 | } |
| 4252 | |
| 4253 | static bool isLTorLE(ISD::CondCode CC) { |
| 4254 | return CC == ISD::SETLT || CC == ISD::SETLE; |
| 4255 | } |
| 4256 | |
| 4257 | // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating. |
| 4258 | // All of these conditions (and their <= and >= counterparts) will do: |
| 4259 | // x < k ? k : x |
| 4260 | // x > k ? x : k |
| 4261 | // k < x ? x : k |
| 4262 | // k > x ? k : x |
| 4263 | static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, |
| 4264 | const SDValue TrueVal, const SDValue FalseVal, |
| 4265 | const ISD::CondCode CC, const SDValue K) { |
| 4266 | return (isGTorGE(CC) && |
| 4267 | ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) || |
| 4268 | (isLTorLE(CC) && |
| 4269 | ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))); |
| 4270 | } |
| 4271 | |
| 4272 | // Similar to isLowerSaturate(), but checks for upper-saturating conditions. |
| 4273 | static bool isUpperSaturate(const SDValue LHS, const SDValue RHS, |
| 4274 | const SDValue TrueVal, const SDValue FalseVal, |
| 4275 | const ISD::CondCode CC, const SDValue K) { |
| 4276 | return (isGTorGE(CC) && |
| 4277 | ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) || |
| 4278 | (isLTorLE(CC) && |
| 4279 | ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))); |
| 4280 | } |
| 4281 | |
| 4282 | // Check if two chained conditionals could be converted into SSAT or USAT. |
| 4283 | // |
| 4284 | // SSAT can replace a set of two conditional selectors that bound a number to an |
| 4285 | // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples: |
| 4286 | // |
| 4287 | // x < -k ? -k : (x > k ? k : x) |
| 4288 | // x < -k ? -k : (x < k ? x : k) |
| 4289 | // x > -k ? (x > k ? k : x) : -k |
| 4290 | // x < k ? (x < -k ? -k : x) : k |
| 4291 | // etc. |
| 4292 | // |
| 4293 | // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 is |
| 4294 | // a power of 2. |
| 4295 | // |
| 4296 | // It returns true if the conversion can be done, false otherwise. |
| 4297 | // Additionally, the variable is returned in parameter V, the constant in K and |
| 4298 | // usat is set to true if the conditional represents an unsigned saturation |
| 4299 | static bool isSaturatingConditional(const SDValue &Op, SDValue &V, |
| 4300 | uint64_t &K, bool &usat) { |
| 4301 | SDValue LHS1 = Op.getOperand(0); |
| 4302 | SDValue RHS1 = Op.getOperand(1); |
| 4303 | SDValue TrueVal1 = Op.getOperand(2); |
| 4304 | SDValue FalseVal1 = Op.getOperand(3); |
| 4305 | ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get(); |
| 4306 | |
| 4307 | const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1; |
| 4308 | if (Op2.getOpcode() != ISD::SELECT_CC) |
| 4309 | return false; |
| 4310 | |
| 4311 | SDValue LHS2 = Op2.getOperand(0); |
| 4312 | SDValue RHS2 = Op2.getOperand(1); |
| 4313 | SDValue TrueVal2 = Op2.getOperand(2); |
| 4314 | SDValue FalseVal2 = Op2.getOperand(3); |
| 4315 | ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get(); |
| 4316 | |
| 4317 | // Find out which are the constants and which are the variables |
| 4318 | // in each conditional |
| 4319 | SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1) |
| 4320 | ? &RHS1 |
| 4321 | : nullptr; |
| 4322 | SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2) |
| 4323 | ? &RHS2 |
| 4324 | : nullptr; |
| 4325 | SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2; |
| 4326 | SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1; |
| 4327 | SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2; |
| 4328 | SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2; |
| 4329 | |
| 4330 | // We must detect cases where the original operations worked with 16- or |
| 4331 | // 8-bit values. In such case, V2Tmp != V2 because the comparison operations |
| 4332 | // must work with sign-extended values but the select operations return |
| 4333 | // the original non-extended value. |
| 4334 | SDValue V2TmpReg = V2Tmp; |
| 4335 | if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG) |
| 4336 | V2TmpReg = V2Tmp->getOperand(0); |
| 4337 | |
| 4338 | // Check that the registers and the constants have the correct values |
| 4339 | // in both conditionals |
| 4340 | if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp || |
| 4341 | V2TmpReg != V2) |
| 4342 | return false; |
| 4343 | |
| 4344 | // Figure out which conditional is saturating the lower/upper bound. |
| 4345 | const SDValue *LowerCheckOp = |
| 4346 | isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1) |
| 4347 | ? &Op |
| 4348 | : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) |
| 4349 | ? &Op2 |
| 4350 | : nullptr; |
| 4351 | const SDValue *UpperCheckOp = |
| 4352 | isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1) |
| 4353 | ? &Op |
| 4354 | : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) |
| 4355 | ? &Op2 |
| 4356 | : nullptr; |
| 4357 | |
| 4358 | if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp) |
| 4359 | return false; |
| 4360 | |
| 4361 | // Check that the constant in the lower-bound check is |
| 4362 | // the opposite of the constant in the upper-bound check |
| 4363 | // in 1's complement. |
| 4364 | int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue(); |
| 4365 | int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue(); |
| 4366 | int64_t PosVal = std::max(Val1, Val2); |
| 4367 | int64_t NegVal = std::min(Val1, Val2); |
| 4368 | |
| 4369 | if (((Val1 > Val2 && UpperCheckOp == &Op) || |
| 4370 | (Val1 < Val2 && UpperCheckOp == &Op2)) && |
| 4371 | isPowerOf2_64(PosVal + 1)) { |
| 4372 | |
| 4373 | // Handle the difference between USAT (unsigned) and SSAT (signed) saturation |
| 4374 | if (Val1 == ~Val2) |
| 4375 | usat = false; |
| 4376 | else if (NegVal == 0) |
| 4377 | usat = true; |
| 4378 | else |
| 4379 | return false; |
| 4380 | |
| 4381 | V = V2; |
| 4382 | K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive |
| 4383 | |
| 4384 | return true; |
| 4385 | } |
| 4386 | |
| 4387 | return false; |
| 4388 | } |
| 4389 | |
| 4390 | // Check if a condition of the type x < k ? k : x can be converted into a |
| 4391 | // bit operation instead of conditional moves. |
| 4392 | // Currently this is allowed given: |
| 4393 | // - The conditions and values match up |
| 4394 | // - k is 0 or -1 (all ones) |
| 4395 | // This function will not check the last condition, thats up to the caller |
| 4396 | // It returns true if the transformation can be made, and in such case |
| 4397 | // returns x in V, and k in SatK. |
| 4398 | static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, |
| 4399 | SDValue &SatK) |
| 4400 | { |
| 4401 | SDValue LHS = Op.getOperand(0); |
| 4402 | SDValue RHS = Op.getOperand(1); |
| 4403 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); |
| 4404 | SDValue TrueVal = Op.getOperand(2); |
| 4405 | SDValue FalseVal = Op.getOperand(3); |
| 4406 | |
| 4407 | SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS) |
| 4408 | ? &RHS |
| 4409 | : nullptr; |
| 4410 | |
| 4411 | // No constant operation in comparison, early out |
| 4412 | if (!K) |
| 4413 | return false; |
| 4414 | |
| 4415 | SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal; |
| 4416 | V = (KTmp == TrueVal) ? FalseVal : TrueVal; |
| 4417 | SDValue VTmp = (K && *K == LHS) ? RHS : LHS; |
| 4418 | |
| 4419 | // If the constant on left and right side, or variable on left and right, |
| 4420 | // does not match, early out |
| 4421 | if (*K != KTmp || V != VTmp) |
| 4422 | return false; |
| 4423 | |
| 4424 | if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, *K)) { |
| 4425 | SatK = *K; |
| 4426 | return true; |
| 4427 | } |
| 4428 | |
| 4429 | return false; |
| 4430 | } |
| 4431 | |
| 4432 | SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { |
| 4433 | EVT VT = Op.getValueType(); |
| 4434 | SDLoc dl(Op); |
| 4435 | |
| 4436 | // Try to convert two saturating conditional selects into a single SSAT |
| 4437 | SDValue SatValue; |
| 4438 | uint64_t SatConstant; |
| 4439 | bool SatUSat; |
| 4440 | if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) && |
| 4441 | isSaturatingConditional(Op, SatValue, SatConstant, SatUSat)) { |
| 4442 | if (SatUSat) |
| 4443 | return DAG.getNode(ARMISD::USAT, dl, VT, SatValue, |
| 4444 | DAG.getConstant(countTrailingOnes(SatConstant), dl, VT)); |
| 4445 | else |
| 4446 | return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue, |
| 4447 | DAG.getConstant(countTrailingOnes(SatConstant), dl, VT)); |
| 4448 | } |
| 4449 | |
| 4450 | // Try to convert expressions of the form x < k ? k : x (and similar forms) |
| 4451 | // into more efficient bit operations, which is possible when k is 0 or -1 |
| 4452 | // On ARM and Thumb-2 which have flexible operand 2 this will result in |
| 4453 | // single instructions. On Thumb the shift and the bit operation will be two |
| 4454 | // instructions. |
| 4455 | // Only allow this transformation on full-width (32-bit) operations |
| 4456 | SDValue LowerSatConstant; |
| 4457 | if (VT == MVT::i32 && |
| 4458 | isLowerSaturatingConditional(Op, SatValue, LowerSatConstant)) { |
| 4459 | SDValue ShiftV = DAG.getNode(ISD::SRA, dl, VT, SatValue, |
| 4460 | DAG.getConstant(31, dl, VT)); |
| 4461 | if (isNullConstant(LowerSatConstant)) { |
| 4462 | SDValue NotShiftV = DAG.getNode(ISD::XOR, dl, VT, ShiftV, |
| 4463 | DAG.getAllOnesConstant(dl, VT)); |
| 4464 | return DAG.getNode(ISD::AND, dl, VT, SatValue, NotShiftV); |
| 4465 | } else if (isAllOnesConstant(LowerSatConstant)) |
| 4466 | return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV); |
| 4467 | } |
| 4468 | |
| 4469 | SDValue LHS = Op.getOperand(0); |
| 4470 | SDValue RHS = Op.getOperand(1); |
| 4471 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); |
| 4472 | SDValue TrueVal = Op.getOperand(2); |
| 4473 | SDValue FalseVal = Op.getOperand(3); |
| 4474 | |
| 4475 | if (!Subtarget->hasFP64() && LHS.getValueType() == MVT::f64) { |
| 4476 | DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, |
| 4477 | dl); |
| 4478 | |
| 4479 | // If softenSetCCOperands only returned one value, we should compare it to |
| 4480 | // zero. |
| 4481 | if (!RHS.getNode()) { |
| 4482 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); |
| 4483 | CC = ISD::SETNE; |
| 4484 | } |
| 4485 | } |
| 4486 | |
| 4487 | if (LHS.getValueType() == MVT::i32) { |
| 4488 | // Try to generate VSEL on ARMv8. |
| 4489 | // The VSEL instruction can't use all the usual ARM condition |
| 4490 | // codes: it only has two bits to select the condition code, so it's |
| 4491 | // constrained to use only GE, GT, VS and EQ. |
| 4492 | // |
| 4493 | // To implement all the various ISD::SETXXX opcodes, we sometimes need to |
| 4494 | // swap the operands of the previous compare instruction (effectively |
| 4495 | // inverting the compare condition, swapping 'less' and 'greater') and |
| 4496 | // sometimes need to swap the operands to the VSEL (which inverts the |
| 4497 | // condition in the sense of firing whenever the previous condition didn't) |
| 4498 | if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 || |
| 4499 | TrueVal.getValueType() == MVT::f32 || |
| 4500 | TrueVal.getValueType() == MVT::f64)) { |
| 4501 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 4502 | if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || |
| 4503 | CondCode == ARMCC::VC || CondCode == ARMCC::NE) { |
| 4504 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); |
| 4505 | std::swap(TrueVal, FalseVal); |
| 4506 | } |
| 4507 | } |
| 4508 | |
| 4509 | SDValue ARMcc; |
| 4510 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 4511 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 4512 | return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); |
| 4513 | } |
| 4514 | |
| 4515 | ARMCC::CondCodes CondCode, CondCode2; |
| 4516 | bool InvalidOnQNaN; |
| 4517 | FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN); |
| 4518 | |
| 4519 | // Normalize the fp compare. If RHS is zero we prefer to keep it there so we |
| 4520 | // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we |
| 4521 | // must use VSEL (limited condition codes), due to not having conditional f16 |
| 4522 | // moves. |
| 4523 | if (Subtarget->hasFPARMv8Base() && |
| 4524 | !(isFloatingPointZero(RHS) && TrueVal.getValueType() != MVT::f16) && |
| 4525 | (TrueVal.getValueType() == MVT::f16 || |
| 4526 | TrueVal.getValueType() == MVT::f32 || |
| 4527 | TrueVal.getValueType() == MVT::f64)) { |
| 4528 | bool swpCmpOps = false; |
| 4529 | bool swpVselOps = false; |
| 4530 | checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); |
| 4531 | |
| 4532 | if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || |
| 4533 | CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { |
| 4534 | if (swpCmpOps) |
| 4535 | std::swap(LHS, RHS); |
| 4536 | if (swpVselOps) |
| 4537 | std::swap(TrueVal, FalseVal); |
| 4538 | } |
| 4539 | } |
| 4540 | |
| 4541 | SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); |
| 4542 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); |
| 4543 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 4544 | SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); |
| 4545 | if (CondCode2 != ARMCC::AL) { |
| 4546 | SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32); |
| 4547 | // FIXME: Needs another CMP because flag can have but one use. |
| 4548 | SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); |
| 4549 | Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG); |
| 4550 | } |
| 4551 | return Result; |
| 4552 | } |
| 4553 | |
| 4554 | /// canChangeToInt - Given the fp compare operand, return true if it is suitable |
| 4555 | /// to morph to an integer compare sequence. |
| 4556 | static bool canChangeToInt(SDValue Op, bool &SeenZero, |
| 4557 | const ARMSubtarget *Subtarget) { |
| 4558 | SDNode *N = Op.getNode(); |
| 4559 | if (!N->hasOneUse()) |
| 4560 | // Otherwise it requires moving the value from fp to integer registers. |
| 4561 | return false; |
| 4562 | if (!N->getNumValues()) |
| 4563 | return false; |
| 4564 | EVT VT = Op.getValueType(); |
| 4565 | if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) |
| 4566 | // f32 case is generally profitable. f64 case only makes sense when vcmpe + |
| 4567 | // vmrs are very slow, e.g. cortex-a8. |
| 4568 | return false; |
| 4569 | |
| 4570 | if (isFloatingPointZero(Op)) { |
| 4571 | SeenZero = true; |
| 4572 | return true; |
| 4573 | } |
| 4574 | return ISD::isNormalLoad(N); |
| 4575 | } |
| 4576 | |
| 4577 | static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { |
| 4578 | if (isFloatingPointZero(Op)) |
| 4579 | return DAG.getConstant(0, SDLoc(Op), MVT::i32); |
| 4580 | |
| 4581 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) |
| 4582 | return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(), |
| 4583 | Ld->getPointerInfo(), Ld->getAlignment(), |
| 4584 | Ld->getMemOperand()->getFlags()); |
| 4585 | |
| 4586 | llvm_unreachable("Unknown VFP cmp argument!" ); |
| 4587 | } |
| 4588 | |
| 4589 | static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, |
| 4590 | SDValue &RetVal1, SDValue &RetVal2) { |
| 4591 | SDLoc dl(Op); |
| 4592 | |
| 4593 | if (isFloatingPointZero(Op)) { |
| 4594 | RetVal1 = DAG.getConstant(0, dl, MVT::i32); |
| 4595 | RetVal2 = DAG.getConstant(0, dl, MVT::i32); |
| 4596 | return; |
| 4597 | } |
| 4598 | |
| 4599 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { |
| 4600 | SDValue Ptr = Ld->getBasePtr(); |
| 4601 | RetVal1 = |
| 4602 | DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(), |
| 4603 | Ld->getAlignment(), Ld->getMemOperand()->getFlags()); |
| 4604 | |
| 4605 | EVT PtrType = Ptr.getValueType(); |
| 4606 | unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); |
| 4607 | SDValue NewPtr = DAG.getNode(ISD::ADD, dl, |
| 4608 | PtrType, Ptr, DAG.getConstant(4, dl, PtrType)); |
| 4609 | RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr, |
| 4610 | Ld->getPointerInfo().getWithOffset(4), NewAlign, |
| 4611 | Ld->getMemOperand()->getFlags()); |
| 4612 | return; |
| 4613 | } |
| 4614 | |
| 4615 | llvm_unreachable("Unknown VFP cmp argument!" ); |
| 4616 | } |
| 4617 | |
| 4618 | /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some |
| 4619 | /// f32 and even f64 comparisons to integer ones. |
| 4620 | SDValue |
| 4621 | ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { |
| 4622 | SDValue Chain = Op.getOperand(0); |
| 4623 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); |
| 4624 | SDValue LHS = Op.getOperand(2); |
| 4625 | SDValue RHS = Op.getOperand(3); |
| 4626 | SDValue Dest = Op.getOperand(4); |
| 4627 | SDLoc dl(Op); |
| 4628 | |
| 4629 | bool LHSSeenZero = false; |
| 4630 | bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); |
| 4631 | bool RHSSeenZero = false; |
| 4632 | bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); |
| 4633 | if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { |
| 4634 | // If unsafe fp math optimization is enabled and there are no other uses of |
| 4635 | // the CMP operands, and the condition code is EQ or NE, we can optimize it |
| 4636 | // to an integer comparison. |
| 4637 | if (CC == ISD::SETOEQ) |
| 4638 | CC = ISD::SETEQ; |
| 4639 | else if (CC == ISD::SETUNE) |
| 4640 | CC = ISD::SETNE; |
| 4641 | |
| 4642 | SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32); |
| 4643 | SDValue ARMcc; |
| 4644 | if (LHS.getValueType() == MVT::f32) { |
| 4645 | LHS = DAG.getNode(ISD::AND, dl, MVT::i32, |
| 4646 | bitcastf32Toi32(LHS, DAG), Mask); |
| 4647 | RHS = DAG.getNode(ISD::AND, dl, MVT::i32, |
| 4648 | bitcastf32Toi32(RHS, DAG), Mask); |
| 4649 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 4650 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 4651 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, |
| 4652 | Chain, Dest, ARMcc, CCR, Cmp); |
| 4653 | } |
| 4654 | |
| 4655 | SDValue LHS1, LHS2; |
| 4656 | SDValue RHS1, RHS2; |
| 4657 | expandf64Toi32(LHS, DAG, LHS1, LHS2); |
| 4658 | expandf64Toi32(RHS, DAG, RHS1, RHS2); |
| 4659 | LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); |
| 4660 | RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); |
| 4661 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 4662 | ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); |
| 4663 | SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); |
| 4664 | SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; |
| 4665 | return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops); |
| 4666 | } |
| 4667 | |
| 4668 | return SDValue(); |
| 4669 | } |
| 4670 | |
| 4671 | SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { |
| 4672 | SDValue Chain = Op.getOperand(0); |
| 4673 | SDValue Cond = Op.getOperand(1); |
| 4674 | SDValue Dest = Op.getOperand(2); |
| 4675 | SDLoc dl(Op); |
| 4676 | |
| 4677 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
| 4678 | // instruction. |
| 4679 | unsigned Opc = Cond.getOpcode(); |
| 4680 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && |
| 4681 | !Subtarget->isThumb1Only(); |
| 4682 | if (Cond.getResNo() == 1 && |
| 4683 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 4684 | Opc == ISD::USUBO || OptimizeMul)) { |
| 4685 | // Only lower legal XALUO ops. |
| 4686 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) |
| 4687 | return SDValue(); |
| 4688 | |
| 4689 | // The actual operation with overflow check. |
| 4690 | SDValue Value, OverflowCmp; |
| 4691 | SDValue ARMcc; |
| 4692 | std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); |
| 4693 | |
| 4694 | // Reverse the condition code. |
| 4695 | ARMCC::CondCodes CondCode = |
| 4696 | (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); |
| 4697 | CondCode = ARMCC::getOppositeCondition(CondCode); |
| 4698 | ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); |
| 4699 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 4700 | |
| 4701 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, |
| 4702 | OverflowCmp); |
| 4703 | } |
| 4704 | |
| 4705 | return SDValue(); |
| 4706 | } |
| 4707 | |
| 4708 | SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { |
| 4709 | SDValue Chain = Op.getOperand(0); |
| 4710 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); |
| 4711 | SDValue LHS = Op.getOperand(2); |
| 4712 | SDValue RHS = Op.getOperand(3); |
| 4713 | SDValue Dest = Op.getOperand(4); |
| 4714 | SDLoc dl(Op); |
| 4715 | |
| 4716 | if (!Subtarget->hasFP64() && LHS.getValueType() == MVT::f64) { |
| 4717 | DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, |
| 4718 | dl); |
| 4719 | |
| 4720 | // If softenSetCCOperands only returned one value, we should compare it to |
| 4721 | // zero. |
| 4722 | if (!RHS.getNode()) { |
| 4723 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); |
| 4724 | CC = ISD::SETNE; |
| 4725 | } |
| 4726 | } |
| 4727 | |
| 4728 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
| 4729 | // instruction. |
| 4730 | unsigned Opc = LHS.getOpcode(); |
| 4731 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && |
| 4732 | !Subtarget->isThumb1Only(); |
| 4733 | if (LHS.getResNo() == 1 && (isOneConstant(RHS) || isNullConstant(RHS)) && |
| 4734 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 4735 | Opc == ISD::USUBO || OptimizeMul) && |
| 4736 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { |
| 4737 | // Only lower legal XALUO ops. |
| 4738 | if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0))) |
| 4739 | return SDValue(); |
| 4740 | |
| 4741 | // The actual operation with overflow check. |
| 4742 | SDValue Value, OverflowCmp; |
| 4743 | SDValue ARMcc; |
| 4744 | std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.getValue(0), DAG, ARMcc); |
| 4745 | |
| 4746 | if ((CC == ISD::SETNE) != isOneConstant(RHS)) { |
| 4747 | // Reverse the condition code. |
| 4748 | ARMCC::CondCodes CondCode = |
| 4749 | (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); |
| 4750 | CondCode = ARMCC::getOppositeCondition(CondCode); |
| 4751 | ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); |
| 4752 | } |
| 4753 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 4754 | |
| 4755 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, |
| 4756 | OverflowCmp); |
| 4757 | } |
| 4758 | |
| 4759 | if (LHS.getValueType() == MVT::i32) { |
| 4760 | SDValue ARMcc; |
| 4761 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 4762 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 4763 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, |
| 4764 | Chain, Dest, ARMcc, CCR, Cmp); |
| 4765 | } |
| 4766 | |
| 4767 | if (getTargetMachine().Options.UnsafeFPMath && |
| 4768 | (CC == ISD::SETEQ || CC == ISD::SETOEQ || |
| 4769 | CC == ISD::SETNE || CC == ISD::SETUNE)) { |
| 4770 | if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) |
| 4771 | return Result; |
| 4772 | } |
| 4773 | |
| 4774 | ARMCC::CondCodes CondCode, CondCode2; |
| 4775 | bool InvalidOnQNaN; |
| 4776 | FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN); |
| 4777 | |
| 4778 | SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); |
| 4779 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); |
| 4780 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 4781 | SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); |
| 4782 | SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; |
| 4783 | SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); |
| 4784 | if (CondCode2 != ARMCC::AL) { |
| 4785 | ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); |
| 4786 | SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; |
| 4787 | Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); |
| 4788 | } |
| 4789 | return Res; |
| 4790 | } |
| 4791 | |
| 4792 | SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { |
| 4793 | SDValue Chain = Op.getOperand(0); |
| 4794 | SDValue Table = Op.getOperand(1); |
| 4795 | SDValue Index = Op.getOperand(2); |
| 4796 | SDLoc dl(Op); |
| 4797 | |
| 4798 | EVT PTy = getPointerTy(DAG.getDataLayout()); |
| 4799 | JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); |
| 4800 | SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); |
| 4801 | Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI); |
| 4802 | Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy)); |
| 4803 | SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Index); |
| 4804 | if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) { |
| 4805 | // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table |
| 4806 | // which does another jump to the destination. This also makes it easier |
| 4807 | // to translate it to TBB / TBH later (Thumb2 only). |
| 4808 | // FIXME: This might not work if the function is extremely large. |
| 4809 | return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, |
| 4810 | Addr, Op.getOperand(2), JTI); |
| 4811 | } |
| 4812 | if (isPositionIndependent() || Subtarget->isROPI()) { |
| 4813 | Addr = |
| 4814 | DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, |
| 4815 | MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); |
| 4816 | Chain = Addr.getValue(1); |
| 4817 | Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Addr); |
| 4818 | return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); |
| 4819 | } else { |
| 4820 | Addr = |
| 4821 | DAG.getLoad(PTy, dl, Chain, Addr, |
| 4822 | MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); |
| 4823 | Chain = Addr.getValue(1); |
| 4824 | return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); |
| 4825 | } |
| 4826 | } |
| 4827 | |
| 4828 | static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { |
| 4829 | EVT VT = Op.getValueType(); |
| 4830 | SDLoc dl(Op); |
| 4831 | |
| 4832 | if (Op.getValueType().getVectorElementType() == MVT::i32) { |
| 4833 | if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) |
| 4834 | return Op; |
| 4835 | return DAG.UnrollVectorOp(Op.getNode()); |
| 4836 | } |
| 4837 | |
| 4838 | const bool HasFullFP16 = |
| 4839 | static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16(); |
| 4840 | |
| 4841 | EVT NewTy; |
| 4842 | const EVT OpTy = Op.getOperand(0).getValueType(); |
| 4843 | if (OpTy == MVT::v4f32) |
| 4844 | NewTy = MVT::v4i32; |
| 4845 | else if (OpTy == MVT::v4f16 && HasFullFP16) |
| 4846 | NewTy = MVT::v4i16; |
| 4847 | else if (OpTy == MVT::v8f16 && HasFullFP16) |
| 4848 | NewTy = MVT::v8i16; |
| 4849 | else |
| 4850 | llvm_unreachable("Invalid type for custom lowering!" ); |
| 4851 | |
| 4852 | if (VT != MVT::v4i16 && VT != MVT::v8i16) |
| 4853 | return DAG.UnrollVectorOp(Op.getNode()); |
| 4854 | |
| 4855 | Op = DAG.getNode(Op.getOpcode(), dl, NewTy, Op.getOperand(0)); |
| 4856 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); |
| 4857 | } |
| 4858 | |
| 4859 | SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { |
| 4860 | EVT VT = Op.getValueType(); |
| 4861 | if (VT.isVector()) |
| 4862 | return LowerVectorFP_TO_INT(Op, DAG); |
| 4863 | if (!Subtarget->hasFP64() && Op.getOperand(0).getValueType() == MVT::f64) { |
| 4864 | RTLIB::Libcall LC; |
| 4865 | if (Op.getOpcode() == ISD::FP_TO_SINT) |
| 4866 | LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), |
| 4867 | Op.getValueType()); |
| 4868 | else |
| 4869 | LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), |
| 4870 | Op.getValueType()); |
| 4871 | return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), |
| 4872 | /*isSigned*/ false, SDLoc(Op)).first; |
| 4873 | } |
| 4874 | |
| 4875 | return Op; |
| 4876 | } |
| 4877 | |
| 4878 | static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { |
| 4879 | EVT VT = Op.getValueType(); |
| 4880 | SDLoc dl(Op); |
| 4881 | |
| 4882 | if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { |
| 4883 | if (VT.getVectorElementType() == MVT::f32) |
| 4884 | return Op; |
| 4885 | return DAG.UnrollVectorOp(Op.getNode()); |
| 4886 | } |
| 4887 | |
| 4888 | assert((Op.getOperand(0).getValueType() == MVT::v4i16 || |
| 4889 | Op.getOperand(0).getValueType() == MVT::v8i16) && |
| 4890 | "Invalid type for custom lowering!" ); |
| 4891 | |
| 4892 | const bool HasFullFP16 = |
| 4893 | static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16(); |
| 4894 | |
| 4895 | EVT DestVecType; |
| 4896 | if (VT == MVT::v4f32) |
| 4897 | DestVecType = MVT::v4i32; |
| 4898 | else if (VT == MVT::v4f16 && HasFullFP16) |
| 4899 | DestVecType = MVT::v4i16; |
| 4900 | else if (VT == MVT::v8f16 && HasFullFP16) |
| 4901 | DestVecType = MVT::v8i16; |
| 4902 | else |
| 4903 | return DAG.UnrollVectorOp(Op.getNode()); |
| 4904 | |
| 4905 | unsigned CastOpc; |
| 4906 | unsigned Opc; |
| 4907 | switch (Op.getOpcode()) { |
| 4908 | default: llvm_unreachable("Invalid opcode!" ); |
| 4909 | case ISD::SINT_TO_FP: |
| 4910 | CastOpc = ISD::SIGN_EXTEND; |
| 4911 | Opc = ISD::SINT_TO_FP; |
| 4912 | break; |
| 4913 | case ISD::UINT_TO_FP: |
| 4914 | CastOpc = ISD::ZERO_EXTEND; |
| 4915 | Opc = ISD::UINT_TO_FP; |
| 4916 | break; |
| 4917 | } |
| 4918 | |
| 4919 | Op = DAG.getNode(CastOpc, dl, DestVecType, Op.getOperand(0)); |
| 4920 | return DAG.getNode(Opc, dl, VT, Op); |
| 4921 | } |
| 4922 | |
| 4923 | SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { |
| 4924 | EVT VT = Op.getValueType(); |
| 4925 | if (VT.isVector()) |
| 4926 | return LowerVectorINT_TO_FP(Op, DAG); |
| 4927 | if (!Subtarget->hasFP64() && Op.getValueType() == MVT::f64) { |
| 4928 | RTLIB::Libcall LC; |
| 4929 | if (Op.getOpcode() == ISD::SINT_TO_FP) |
| 4930 | LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), |
| 4931 | Op.getValueType()); |
| 4932 | else |
| 4933 | LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), |
| 4934 | Op.getValueType()); |
| 4935 | return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), |
| 4936 | /*isSigned*/ false, SDLoc(Op)).first; |
| 4937 | } |
| 4938 | |
| 4939 | return Op; |
| 4940 | } |
| 4941 | |
| 4942 | SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { |
| 4943 | // Implement fcopysign with a fabs and a conditional fneg. |
| 4944 | SDValue Tmp0 = Op.getOperand(0); |
| 4945 | SDValue Tmp1 = Op.getOperand(1); |
| 4946 | SDLoc dl(Op); |
| 4947 | EVT VT = Op.getValueType(); |
| 4948 | EVT SrcVT = Tmp1.getValueType(); |
| 4949 | bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || |
| 4950 | Tmp0.getOpcode() == ARMISD::VMOVDRR; |
| 4951 | bool UseNEON = !InGPR && Subtarget->hasNEON(); |
| 4952 | |
| 4953 | if (UseNEON) { |
| 4954 | // Use VBSL to copy the sign bit. |
| 4955 | unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); |
| 4956 | SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, |
| 4957 | DAG.getTargetConstant(EncodedVal, dl, MVT::i32)); |
| 4958 | EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; |
| 4959 | if (VT == MVT::f64) |
| 4960 | Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, |
| 4961 | DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), |
| 4962 | DAG.getConstant(32, dl, MVT::i32)); |
| 4963 | else /*if (VT == MVT::f32)*/ |
| 4964 | Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); |
| 4965 | if (SrcVT == MVT::f32) { |
| 4966 | Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); |
| 4967 | if (VT == MVT::f64) |
| 4968 | Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, |
| 4969 | DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), |
| 4970 | DAG.getConstant(32, dl, MVT::i32)); |
| 4971 | } else if (VT == MVT::f32) |
| 4972 | Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, |
| 4973 | DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), |
| 4974 | DAG.getConstant(32, dl, MVT::i32)); |
| 4975 | Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); |
| 4976 | Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); |
| 4977 | |
| 4978 | SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), |
| 4979 | dl, MVT::i32); |
| 4980 | AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); |
| 4981 | SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, |
| 4982 | DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); |
| 4983 | |
| 4984 | SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, |
| 4985 | DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), |
| 4986 | DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); |
| 4987 | if (VT == MVT::f32) { |
| 4988 | Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); |
| 4989 | Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, |
| 4990 | DAG.getConstant(0, dl, MVT::i32)); |
| 4991 | } else { |
| 4992 | Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); |
| 4993 | } |
| 4994 | |
| 4995 | return Res; |
| 4996 | } |
| 4997 | |
| 4998 | // Bitcast operand 1 to i32. |
| 4999 | if (SrcVT == MVT::f64) |
| 5000 | Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), |
| 5001 | Tmp1).getValue(1); |
| 5002 | Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); |
| 5003 | |
| 5004 | // Or in the signbit with integer operations. |
| 5005 | SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32); |
| 5006 | SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32); |
| 5007 | Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); |
| 5008 | if (VT == MVT::f32) { |
| 5009 | Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, |
| 5010 | DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); |
| 5011 | return DAG.getNode(ISD::BITCAST, dl, MVT::f32, |
| 5012 | DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); |
| 5013 | } |
| 5014 | |
| 5015 | // f64: Or the high part with signbit and then combine two parts. |
| 5016 | Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), |
| 5017 | Tmp0); |
| 5018 | SDValue Lo = Tmp0.getValue(0); |
| 5019 | SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); |
| 5020 | Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); |
| 5021 | return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); |
| 5022 | } |
| 5023 | |
| 5024 | SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ |
| 5025 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5026 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 5027 | MFI.setReturnAddressIsTaken(true); |
| 5028 | |
| 5029 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) |
| 5030 | return SDValue(); |
| 5031 | |
| 5032 | EVT VT = Op.getValueType(); |
| 5033 | SDLoc dl(Op); |
| 5034 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
| 5035 | if (Depth) { |
| 5036 | SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); |
| 5037 | SDValue Offset = DAG.getConstant(4, dl, MVT::i32); |
| 5038 | return DAG.getLoad(VT, dl, DAG.getEntryNode(), |
| 5039 | DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), |
| 5040 | MachinePointerInfo()); |
| 5041 | } |
| 5042 | |
| 5043 | // Return LR, which contains the return address. Mark it an implicit live-in. |
| 5044 | unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); |
| 5045 | return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); |
| 5046 | } |
| 5047 | |
| 5048 | SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { |
| 5049 | const ARMBaseRegisterInfo &ARI = |
| 5050 | *static_cast<const ARMBaseRegisterInfo*>(RegInfo); |
| 5051 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5052 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 5053 | MFI.setFrameAddressIsTaken(true); |
| 5054 | |
| 5055 | EVT VT = Op.getValueType(); |
| 5056 | SDLoc dl(Op); // FIXME probably not meaningful |
| 5057 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
| 5058 | unsigned FrameReg = ARI.getFrameRegister(MF); |
| 5059 | SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); |
| 5060 | while (Depth--) |
| 5061 | FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, |
| 5062 | MachinePointerInfo()); |
| 5063 | return FrameAddr; |
| 5064 | } |
| 5065 | |
| 5066 | // FIXME? Maybe this could be a TableGen attribute on some registers and |
| 5067 | // this table could be generated automatically from RegInfo. |
| 5068 | unsigned ARMTargetLowering::getRegisterByName(const char* RegName, EVT VT, |
| 5069 | SelectionDAG &DAG) const { |
| 5070 | unsigned Reg = StringSwitch<unsigned>(RegName) |
| 5071 | .Case("sp" , ARM::SP) |
| 5072 | .Default(0); |
| 5073 | if (Reg) |
| 5074 | return Reg; |
| 5075 | report_fatal_error(Twine("Invalid register name \"" |
| 5076 | + StringRef(RegName) + "\"." )); |
| 5077 | } |
| 5078 | |
| 5079 | // Result is 64 bit value so split into two 32 bit values and return as a |
| 5080 | // pair of values. |
| 5081 | static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 5082 | SelectionDAG &DAG) { |
| 5083 | SDLoc DL(N); |
| 5084 | |
| 5085 | // This function is only supposed to be called for i64 type destination. |
| 5086 | assert(N->getValueType(0) == MVT::i64 |
| 5087 | && "ExpandREAD_REGISTER called for non-i64 type result." ); |
| 5088 | |
| 5089 | SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL, |
| 5090 | DAG.getVTList(MVT::i32, MVT::i32, MVT::Other), |
| 5091 | N->getOperand(0), |
| 5092 | N->getOperand(1)); |
| 5093 | |
| 5094 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0), |
| 5095 | Read.getValue(1))); |
| 5096 | Results.push_back(Read.getOperand(0)); |
| 5097 | } |
| 5098 | |
| 5099 | /// \p BC is a bitcast that is about to be turned into a VMOVDRR. |
| 5100 | /// When \p DstVT, the destination type of \p BC, is on the vector |
| 5101 | /// register bank and the source of bitcast, \p Op, operates on the same bank, |
| 5102 | /// it might be possible to combine them, such that everything stays on the |
| 5103 | /// vector register bank. |
| 5104 | /// \p return The node that would replace \p BT, if the combine |
| 5105 | /// is possible. |
| 5106 | static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, |
| 5107 | SelectionDAG &DAG) { |
| 5108 | SDValue Op = BC->getOperand(0); |
| 5109 | EVT DstVT = BC->getValueType(0); |
| 5110 | |
| 5111 | // The only vector instruction that can produce a scalar (remember, |
| 5112 | // since the bitcast was about to be turned into VMOVDRR, the source |
| 5113 | // type is i64) from a vector is EXTRACT_VECTOR_ELT. |
| 5114 | // Moreover, we can do this combine only if there is one use. |
| 5115 | // Finally, if the destination type is not a vector, there is not |
| 5116 | // much point on forcing everything on the vector bank. |
| 5117 | if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 5118 | !Op.hasOneUse()) |
| 5119 | return SDValue(); |
| 5120 | |
| 5121 | // If the index is not constant, we will introduce an additional |
| 5122 | // multiply that will stick. |
| 5123 | // Give up in that case. |
| 5124 | ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1)); |
| 5125 | if (!Index) |
| 5126 | return SDValue(); |
| 5127 | unsigned DstNumElt = DstVT.getVectorNumElements(); |
| 5128 | |
| 5129 | // Compute the new index. |
| 5130 | const APInt &APIntIndex = Index->getAPIntValue(); |
| 5131 | APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); |
| 5132 | NewIndex *= APIntIndex; |
| 5133 | // Check if the new constant index fits into i32. |
| 5134 | if (NewIndex.getBitWidth() > 32) |
| 5135 | return SDValue(); |
| 5136 | |
| 5137 | // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> |
| 5138 | // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) |
| 5139 | SDLoc dl(Op); |
| 5140 | SDValue = Op.getOperand(0); |
| 5141 | EVT VecVT = EVT::getVectorVT( |
| 5142 | *DAG.getContext(), DstVT.getScalarType(), |
| 5143 | ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); |
| 5144 | SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc); |
| 5145 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast, |
| 5146 | DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32)); |
| 5147 | } |
| 5148 | |
| 5149 | /// ExpandBITCAST - If the target supports VFP, this function is called to |
| 5150 | /// expand a bit convert where either the source or destination type is i64 to |
| 5151 | /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 |
| 5152 | /// operand type is illegal (e.g., v2f32 for a target that doesn't support |
| 5153 | /// vectors), since the legalizer won't know what to do with that. |
| 5154 | static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG, |
| 5155 | const ARMSubtarget *Subtarget) { |
| 5156 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 5157 | SDLoc dl(N); |
| 5158 | SDValue Op = N->getOperand(0); |
| 5159 | |
| 5160 | // This function is only supposed to be called for i64 types, either as the |
| 5161 | // source or destination of the bit convert. |
| 5162 | EVT SrcVT = Op.getValueType(); |
| 5163 | EVT DstVT = N->getValueType(0); |
| 5164 | const bool HasFullFP16 = Subtarget->hasFullFP16(); |
| 5165 | |
| 5166 | if (SrcVT == MVT::f32 && DstVT == MVT::i32) { |
| 5167 | // FullFP16: half values are passed in S-registers, and we don't |
| 5168 | // need any of the bitcast and moves: |
| 5169 | // |
| 5170 | // t2: f32,ch = CopyFromReg t0, Register:f32 %0 |
| 5171 | // t5: i32 = bitcast t2 |
| 5172 | // t18: f16 = ARMISD::VMOVhr t5 |
| 5173 | if (Op.getOpcode() != ISD::CopyFromReg || |
| 5174 | Op.getValueType() != MVT::f32) |
| 5175 | return SDValue(); |
| 5176 | |
| 5177 | auto Move = N->use_begin(); |
| 5178 | if (Move->getOpcode() != ARMISD::VMOVhr) |
| 5179 | return SDValue(); |
| 5180 | |
| 5181 | SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; |
| 5182 | SDValue Copy = DAG.getNode(ISD::CopyFromReg, SDLoc(Op), MVT::f16, Ops); |
| 5183 | DAG.ReplaceAllUsesWith(*Move, &Copy); |
| 5184 | return Copy; |
| 5185 | } |
| 5186 | |
| 5187 | if (SrcVT == MVT::i16 && DstVT == MVT::f16) { |
| 5188 | if (!HasFullFP16) |
| 5189 | return SDValue(); |
| 5190 | // SoftFP: read half-precision arguments: |
| 5191 | // |
| 5192 | // t2: i32,ch = ... |
| 5193 | // t7: i16 = truncate t2 <~~~~ Op |
| 5194 | // t8: f16 = bitcast t7 <~~~~ N |
| 5195 | // |
| 5196 | if (Op.getOperand(0).getValueType() == MVT::i32) |
| 5197 | return DAG.getNode(ARMISD::VMOVhr, SDLoc(Op), |
| 5198 | MVT::f16, Op.getOperand(0)); |
| 5199 | |
| 5200 | return SDValue(); |
| 5201 | } |
| 5202 | |
| 5203 | // Half-precision return values |
| 5204 | if (SrcVT == MVT::f16 && DstVT == MVT::i16) { |
| 5205 | if (!HasFullFP16) |
| 5206 | return SDValue(); |
| 5207 | // |
| 5208 | // t11: f16 = fadd t8, t10 |
| 5209 | // t12: i16 = bitcast t11 <~~~ SDNode N |
| 5210 | // t13: i32 = zero_extend t12 |
| 5211 | // t16: ch,glue = CopyToReg t0, Register:i32 %r0, t13 |
| 5212 | // t17: ch = ARMISD::RET_FLAG t16, Register:i32 %r0, t16:1 |
| 5213 | // |
| 5214 | // transform this into: |
| 5215 | // |
| 5216 | // t20: i32 = ARMISD::VMOVrh t11 |
| 5217 | // t16: ch,glue = CopyToReg t0, Register:i32 %r0, t20 |
| 5218 | // |
| 5219 | auto ZeroExtend = N->use_begin(); |
| 5220 | if (N->use_size() != 1 || ZeroExtend->getOpcode() != ISD::ZERO_EXTEND || |
| 5221 | ZeroExtend->getValueType(0) != MVT::i32) |
| 5222 | return SDValue(); |
| 5223 | |
| 5224 | auto Copy = ZeroExtend->use_begin(); |
| 5225 | if (Copy->getOpcode() == ISD::CopyToReg && |
| 5226 | Copy->use_begin()->getOpcode() == ARMISD::RET_FLAG) { |
| 5227 | SDValue Cvt = DAG.getNode(ARMISD::VMOVrh, SDLoc(Op), MVT::i32, Op); |
| 5228 | DAG.ReplaceAllUsesWith(*ZeroExtend, &Cvt); |
| 5229 | return Cvt; |
| 5230 | } |
| 5231 | return SDValue(); |
| 5232 | } |
| 5233 | |
| 5234 | if (!(SrcVT == MVT::i64 || DstVT == MVT::i64)) |
| 5235 | return SDValue(); |
| 5236 | |
| 5237 | // Turn i64->f64 into VMOVDRR. |
| 5238 | if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { |
| 5239 | // Do not force values to GPRs (this is what VMOVDRR does for the inputs) |
| 5240 | // if we can combine the bitcast with its source. |
| 5241 | if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG)) |
| 5242 | return Val; |
| 5243 | |
| 5244 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, |
| 5245 | DAG.getConstant(0, dl, MVT::i32)); |
| 5246 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, |
| 5247 | DAG.getConstant(1, dl, MVT::i32)); |
| 5248 | return DAG.getNode(ISD::BITCAST, dl, DstVT, |
| 5249 | DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); |
| 5250 | } |
| 5251 | |
| 5252 | // Turn f64->i64 into VMOVRRD. |
| 5253 | if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { |
| 5254 | SDValue Cvt; |
| 5255 | if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && |
| 5256 | SrcVT.getVectorNumElements() > 1) |
| 5257 | Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 5258 | DAG.getVTList(MVT::i32, MVT::i32), |
| 5259 | DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op)); |
| 5260 | else |
| 5261 | Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 5262 | DAG.getVTList(MVT::i32, MVT::i32), Op); |
| 5263 | // Merge the pieces into a single i64 value. |
| 5264 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); |
| 5265 | } |
| 5266 | |
| 5267 | return SDValue(); |
| 5268 | } |
| 5269 | |
| 5270 | /// getZeroVector - Returns a vector of specified type with all zero elements. |
| 5271 | /// Zero vectors are used to represent vector negation and in those cases |
| 5272 | /// will be implemented with the NEON VNEG instruction. However, VNEG does |
| 5273 | /// not support i64 elements, so sometimes the zero vectors will need to be |
| 5274 | /// explicitly constructed. Regardless, use a canonical VMOV to create the |
| 5275 | /// zero vector. |
| 5276 | static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { |
| 5277 | assert(VT.isVector() && "Expected a vector type" ); |
| 5278 | // The canonical modified immediate encoding of a zero vector is....0! |
| 5279 | SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32); |
| 5280 | EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
| 5281 | SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); |
| 5282 | return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); |
| 5283 | } |
| 5284 | |
| 5285 | /// LowerShiftRightParts - Lower SRA_PARTS, which returns two |
| 5286 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
| 5287 | SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, |
| 5288 | SelectionDAG &DAG) const { |
| 5289 | assert(Op.getNumOperands() == 3 && "Not a double-shift!" ); |
| 5290 | EVT VT = Op.getValueType(); |
| 5291 | unsigned VTBits = VT.getSizeInBits(); |
| 5292 | SDLoc dl(Op); |
| 5293 | SDValue ShOpLo = Op.getOperand(0); |
| 5294 | SDValue ShOpHi = Op.getOperand(1); |
| 5295 | SDValue ShAmt = Op.getOperand(2); |
| 5296 | SDValue ARMcc; |
| 5297 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 5298 | unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; |
| 5299 | |
| 5300 | assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); |
| 5301 | |
| 5302 | SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, |
| 5303 | DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); |
| 5304 | SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); |
| 5305 | SDValue = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, |
| 5306 | DAG.getConstant(VTBits, dl, MVT::i32)); |
| 5307 | SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); |
| 5308 | SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); |
| 5309 | SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); |
| 5310 | SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), |
| 5311 | ISD::SETGE, ARMcc, DAG, dl); |
| 5312 | SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift, |
| 5313 | ARMcc, CCR, CmpLo); |
| 5314 | |
| 5315 | SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); |
| 5316 | SDValue HiBigShift = Opc == ISD::SRA |
| 5317 | ? DAG.getNode(Opc, dl, VT, ShOpHi, |
| 5318 | DAG.getConstant(VTBits - 1, dl, VT)) |
| 5319 | : DAG.getConstant(0, dl, VT); |
| 5320 | SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), |
| 5321 | ISD::SETGE, ARMcc, DAG, dl); |
| 5322 | SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, |
| 5323 | ARMcc, CCR, CmpHi); |
| 5324 | |
| 5325 | SDValue Ops[2] = { Lo, Hi }; |
| 5326 | return DAG.getMergeValues(Ops, dl); |
| 5327 | } |
| 5328 | |
| 5329 | /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two |
| 5330 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
| 5331 | SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, |
| 5332 | SelectionDAG &DAG) const { |
| 5333 | assert(Op.getNumOperands() == 3 && "Not a double-shift!" ); |
| 5334 | EVT VT = Op.getValueType(); |
| 5335 | unsigned VTBits = VT.getSizeInBits(); |
| 5336 | SDLoc dl(Op); |
| 5337 | SDValue ShOpLo = Op.getOperand(0); |
| 5338 | SDValue ShOpHi = Op.getOperand(1); |
| 5339 | SDValue ShAmt = Op.getOperand(2); |
| 5340 | SDValue ARMcc; |
| 5341 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 5342 | |
| 5343 | assert(Op.getOpcode() == ISD::SHL_PARTS); |
| 5344 | SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, |
| 5345 | DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); |
| 5346 | SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); |
| 5347 | SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); |
| 5348 | SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); |
| 5349 | |
| 5350 | SDValue = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, |
| 5351 | DAG.getConstant(VTBits, dl, MVT::i32)); |
| 5352 | SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); |
| 5353 | SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), |
| 5354 | ISD::SETGE, ARMcc, DAG, dl); |
| 5355 | SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, |
| 5356 | ARMcc, CCR, CmpHi); |
| 5357 | |
| 5358 | SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), |
| 5359 | ISD::SETGE, ARMcc, DAG, dl); |
| 5360 | SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); |
| 5361 | SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, |
| 5362 | DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo); |
| 5363 | |
| 5364 | SDValue Ops[2] = { Lo, Hi }; |
| 5365 | return DAG.getMergeValues(Ops, dl); |
| 5366 | } |
| 5367 | |
| 5368 | SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, |
| 5369 | SelectionDAG &DAG) const { |
| 5370 | // The rounding mode is in bits 23:22 of the FPSCR. |
| 5371 | // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 |
| 5372 | // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) |
| 5373 | // so that the shift + and get folded into a bitfield extract. |
| 5374 | SDLoc dl(Op); |
| 5375 | SDValue Ops[] = { DAG.getEntryNode(), |
| 5376 | DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32) }; |
| 5377 | |
| 5378 | SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, MVT::i32, Ops); |
| 5379 | SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, |
| 5380 | DAG.getConstant(1U << 22, dl, MVT::i32)); |
| 5381 | SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, |
| 5382 | DAG.getConstant(22, dl, MVT::i32)); |
| 5383 | return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, |
| 5384 | DAG.getConstant(3, dl, MVT::i32)); |
| 5385 | } |
| 5386 | |
| 5387 | static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, |
| 5388 | const ARMSubtarget *ST) { |
| 5389 | SDLoc dl(N); |
| 5390 | EVT VT = N->getValueType(0); |
| 5391 | if (VT.isVector()) { |
| 5392 | assert(ST->hasNEON()); |
| 5393 | |
| 5394 | // Compute the least significant set bit: LSB = X & -X |
| 5395 | SDValue X = N->getOperand(0); |
| 5396 | SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X); |
| 5397 | SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX); |
| 5398 | |
| 5399 | EVT ElemTy = VT.getVectorElementType(); |
| 5400 | |
| 5401 | if (ElemTy == MVT::i8) { |
| 5402 | // Compute with: cttz(x) = ctpop(lsb - 1) |
| 5403 | SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, |
| 5404 | DAG.getTargetConstant(1, dl, ElemTy)); |
| 5405 | SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); |
| 5406 | return DAG.getNode(ISD::CTPOP, dl, VT, Bits); |
| 5407 | } |
| 5408 | |
| 5409 | if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && |
| 5410 | (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { |
| 5411 | // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 |
| 5412 | unsigned NumBits = ElemTy.getSizeInBits(); |
| 5413 | SDValue WidthMinus1 = |
| 5414 | DAG.getNode(ARMISD::VMOVIMM, dl, VT, |
| 5415 | DAG.getTargetConstant(NumBits - 1, dl, ElemTy)); |
| 5416 | SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB); |
| 5417 | return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ); |
| 5418 | } |
| 5419 | |
| 5420 | // Compute with: cttz(x) = ctpop(lsb - 1) |
| 5421 | |
| 5422 | // Compute LSB - 1. |
| 5423 | SDValue Bits; |
| 5424 | if (ElemTy == MVT::i64) { |
| 5425 | // Load constant 0xffff'ffff'ffff'ffff to register. |
| 5426 | SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT, |
| 5427 | DAG.getTargetConstant(0x1eff, dl, MVT::i32)); |
| 5428 | Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF); |
| 5429 | } else { |
| 5430 | SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, |
| 5431 | DAG.getTargetConstant(1, dl, ElemTy)); |
| 5432 | Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); |
| 5433 | } |
| 5434 | return DAG.getNode(ISD::CTPOP, dl, VT, Bits); |
| 5435 | } |
| 5436 | |
| 5437 | if (!ST->hasV6T2Ops()) |
| 5438 | return SDValue(); |
| 5439 | |
| 5440 | SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0)); |
| 5441 | return DAG.getNode(ISD::CTLZ, dl, VT, rbit); |
| 5442 | } |
| 5443 | |
| 5444 | static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, |
| 5445 | const ARMSubtarget *ST) { |
| 5446 | EVT VT = N->getValueType(0); |
| 5447 | SDLoc DL(N); |
| 5448 | |
| 5449 | assert(ST->hasNEON() && "Custom ctpop lowering requires NEON." ); |
| 5450 | assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || |
| 5451 | VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && |
| 5452 | "Unexpected type for custom ctpop lowering" ); |
| 5453 | |
| 5454 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 5455 | EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; |
| 5456 | SDValue Res = DAG.getBitcast(VT8Bit, N->getOperand(0)); |
| 5457 | Res = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Res); |
| 5458 | |
| 5459 | // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds. |
| 5460 | unsigned EltSize = 8; |
| 5461 | unsigned NumElts = VT.is64BitVector() ? 8 : 16; |
| 5462 | while (EltSize != VT.getScalarSizeInBits()) { |
| 5463 | SmallVector<SDValue, 8> Ops; |
| 5464 | Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddlu, DL, |
| 5465 | TLI.getPointerTy(DAG.getDataLayout()))); |
| 5466 | Ops.push_back(Res); |
| 5467 | |
| 5468 | EltSize *= 2; |
| 5469 | NumElts /= 2; |
| 5470 | MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts); |
| 5471 | Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, Ops); |
| 5472 | } |
| 5473 | |
| 5474 | return Res; |
| 5475 | } |
| 5476 | |
| 5477 | static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, |
| 5478 | const ARMSubtarget *ST) { |
| 5479 | EVT VT = N->getValueType(0); |
| 5480 | SDLoc dl(N); |
| 5481 | |
| 5482 | if (!VT.isVector()) |
| 5483 | return SDValue(); |
| 5484 | |
| 5485 | // Lower vector shifts on NEON to use VSHL. |
| 5486 | assert(ST->hasNEON() && "unexpected vector shift" ); |
| 5487 | |
| 5488 | // Left shifts translate directly to the vshiftu intrinsic. |
| 5489 | if (N->getOpcode() == ISD::SHL) |
| 5490 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, |
| 5491 | DAG.getConstant(Intrinsic::arm_neon_vshiftu, dl, |
| 5492 | MVT::i32), |
| 5493 | N->getOperand(0), N->getOperand(1)); |
| 5494 | |
| 5495 | assert((N->getOpcode() == ISD::SRA || |
| 5496 | N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode" ); |
| 5497 | |
| 5498 | // NEON uses the same intrinsics for both left and right shifts. For |
| 5499 | // right shifts, the shift amounts are negative, so negate the vector of |
| 5500 | // shift amounts. |
| 5501 | EVT ShiftVT = N->getOperand(1).getValueType(); |
| 5502 | SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, |
| 5503 | getZeroVector(ShiftVT, DAG, dl), |
| 5504 | N->getOperand(1)); |
| 5505 | Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? |
| 5506 | Intrinsic::arm_neon_vshifts : |
| 5507 | Intrinsic::arm_neon_vshiftu); |
| 5508 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, |
| 5509 | DAG.getConstant(vshiftInt, dl, MVT::i32), |
| 5510 | N->getOperand(0), NegatedCount); |
| 5511 | } |
| 5512 | |
| 5513 | static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, |
| 5514 | const ARMSubtarget *ST) { |
| 5515 | EVT VT = N->getValueType(0); |
| 5516 | SDLoc dl(N); |
| 5517 | |
| 5518 | // We can get here for a node like i32 = ISD::SHL i32, i64 |
| 5519 | if (VT != MVT::i64) |
| 5520 | return SDValue(); |
| 5521 | |
| 5522 | assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && |
| 5523 | "Unknown shift to lower!" ); |
| 5524 | |
| 5525 | // We only lower SRA, SRL of 1 here, all others use generic lowering. |
| 5526 | if (!isOneConstant(N->getOperand(1))) |
| 5527 | return SDValue(); |
| 5528 | |
| 5529 | // If we are in thumb mode, we don't have RRX. |
| 5530 | if (ST->isThumb1Only()) return SDValue(); |
| 5531 | |
| 5532 | // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. |
| 5533 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), |
| 5534 | DAG.getConstant(0, dl, MVT::i32)); |
| 5535 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), |
| 5536 | DAG.getConstant(1, dl, MVT::i32)); |
| 5537 | |
| 5538 | // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and |
| 5539 | // captures the result into a carry flag. |
| 5540 | unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; |
| 5541 | Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi); |
| 5542 | |
| 5543 | // The low part is an ARMISD::RRX operand, which shifts the carry in. |
| 5544 | Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); |
| 5545 | |
| 5546 | // Merge the pieces into a single i64 value. |
| 5547 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); |
| 5548 | } |
| 5549 | |
| 5550 | static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { |
| 5551 | SDValue TmpOp0, TmpOp1; |
| 5552 | bool Invert = false; |
| 5553 | bool Swap = false; |
| 5554 | unsigned Opc = 0; |
| 5555 | |
| 5556 | SDValue Op0 = Op.getOperand(0); |
| 5557 | SDValue Op1 = Op.getOperand(1); |
| 5558 | SDValue CC = Op.getOperand(2); |
| 5559 | EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); |
| 5560 | EVT VT = Op.getValueType(); |
| 5561 | ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); |
| 5562 | SDLoc dl(Op); |
| 5563 | |
| 5564 | if (Op0.getValueType().getVectorElementType() == MVT::i64 && |
| 5565 | (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) { |
| 5566 | // Special-case integer 64-bit equality comparisons. They aren't legal, |
| 5567 | // but they can be lowered with a few vector instructions. |
| 5568 | unsigned CmpElements = CmpVT.getVectorNumElements() * 2; |
| 5569 | EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements); |
| 5570 | SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0); |
| 5571 | SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1); |
| 5572 | SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1, |
| 5573 | DAG.getCondCode(ISD::SETEQ)); |
| 5574 | SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp); |
| 5575 | SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed); |
| 5576 | Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged); |
| 5577 | if (SetCCOpcode == ISD::SETNE) |
| 5578 | Merged = DAG.getNOT(dl, Merged, CmpVT); |
| 5579 | Merged = DAG.getSExtOrTrunc(Merged, dl, VT); |
| 5580 | return Merged; |
| 5581 | } |
| 5582 | |
| 5583 | if (CmpVT.getVectorElementType() == MVT::i64) |
| 5584 | // 64-bit comparisons are not legal in general. |
| 5585 | return SDValue(); |
| 5586 | |
| 5587 | if (Op1.getValueType().isFloatingPoint()) { |
| 5588 | switch (SetCCOpcode) { |
| 5589 | default: llvm_unreachable("Illegal FP comparison" ); |
| 5590 | case ISD::SETUNE: |
| 5591 | case ISD::SETNE: Invert = true; LLVM_FALLTHROUGH; |
| 5592 | case ISD::SETOEQ: |
| 5593 | case ISD::SETEQ: Opc = ARMISD::VCEQ; break; |
| 5594 | case ISD::SETOLT: |
| 5595 | case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH; |
| 5596 | case ISD::SETOGT: |
| 5597 | case ISD::SETGT: Opc = ARMISD::VCGT; break; |
| 5598 | case ISD::SETOLE: |
| 5599 | case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH; |
| 5600 | case ISD::SETOGE: |
| 5601 | case ISD::SETGE: Opc = ARMISD::VCGE; break; |
| 5602 | case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH; |
| 5603 | case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; |
| 5604 | case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH; |
| 5605 | case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; |
| 5606 | case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH; |
| 5607 | case ISD::SETONE: |
| 5608 | // Expand this to (OLT | OGT). |
| 5609 | TmpOp0 = Op0; |
| 5610 | TmpOp1 = Op1; |
| 5611 | Opc = ISD::OR; |
| 5612 | Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0); |
| 5613 | Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1); |
| 5614 | break; |
| 5615 | case ISD::SETUO: |
| 5616 | Invert = true; |
| 5617 | LLVM_FALLTHROUGH; |
| 5618 | case ISD::SETO: |
| 5619 | // Expand this to (OLT | OGE). |
| 5620 | TmpOp0 = Op0; |
| 5621 | TmpOp1 = Op1; |
| 5622 | Opc = ISD::OR; |
| 5623 | Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0); |
| 5624 | Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1); |
| 5625 | break; |
| 5626 | } |
| 5627 | } else { |
| 5628 | // Integer comparisons. |
| 5629 | switch (SetCCOpcode) { |
| 5630 | default: llvm_unreachable("Illegal integer comparison" ); |
| 5631 | case ISD::SETNE: Invert = true; LLVM_FALLTHROUGH; |
| 5632 | case ISD::SETEQ: Opc = ARMISD::VCEQ; break; |
| 5633 | case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH; |
| 5634 | case ISD::SETGT: Opc = ARMISD::VCGT; break; |
| 5635 | case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH; |
| 5636 | case ISD::SETGE: Opc = ARMISD::VCGE; break; |
| 5637 | case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH; |
| 5638 | case ISD::SETUGT: Opc = ARMISD::VCGTU; break; |
| 5639 | case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH; |
| 5640 | case ISD::SETUGE: Opc = ARMISD::VCGEU; break; |
| 5641 | } |
| 5642 | |
| 5643 | // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). |
| 5644 | if (Opc == ARMISD::VCEQ) { |
| 5645 | SDValue AndOp; |
| 5646 | if (ISD::isBuildVectorAllZeros(Op1.getNode())) |
| 5647 | AndOp = Op0; |
| 5648 | else if (ISD::isBuildVectorAllZeros(Op0.getNode())) |
| 5649 | AndOp = Op1; |
| 5650 | |
| 5651 | // Ignore bitconvert. |
| 5652 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) |
| 5653 | AndOp = AndOp.getOperand(0); |
| 5654 | |
| 5655 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { |
| 5656 | Opc = ARMISD::VTST; |
| 5657 | Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0)); |
| 5658 | Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1)); |
| 5659 | Invert = !Invert; |
| 5660 | } |
| 5661 | } |
| 5662 | } |
| 5663 | |
| 5664 | if (Swap) |
| 5665 | std::swap(Op0, Op1); |
| 5666 | |
| 5667 | // If one of the operands is a constant vector zero, attempt to fold the |
| 5668 | // comparison to a specialized compare-against-zero form. |
| 5669 | SDValue SingleOp; |
| 5670 | if (ISD::isBuildVectorAllZeros(Op1.getNode())) |
| 5671 | SingleOp = Op0; |
| 5672 | else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { |
| 5673 | if (Opc == ARMISD::VCGE) |
| 5674 | Opc = ARMISD::VCLEZ; |
| 5675 | else if (Opc == ARMISD::VCGT) |
| 5676 | Opc = ARMISD::VCLTZ; |
| 5677 | SingleOp = Op1; |
| 5678 | } |
| 5679 | |
| 5680 | SDValue Result; |
| 5681 | if (SingleOp.getNode()) { |
| 5682 | switch (Opc) { |
| 5683 | case ARMISD::VCEQ: |
| 5684 | Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break; |
| 5685 | case ARMISD::VCGE: |
| 5686 | Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break; |
| 5687 | case ARMISD::VCLEZ: |
| 5688 | Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break; |
| 5689 | case ARMISD::VCGT: |
| 5690 | Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break; |
| 5691 | case ARMISD::VCLTZ: |
| 5692 | Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break; |
| 5693 | default: |
| 5694 | Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1); |
| 5695 | } |
| 5696 | } else { |
| 5697 | Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1); |
| 5698 | } |
| 5699 | |
| 5700 | Result = DAG.getSExtOrTrunc(Result, dl, VT); |
| 5701 | |
| 5702 | if (Invert) |
| 5703 | Result = DAG.getNOT(dl, Result, VT); |
| 5704 | |
| 5705 | return Result; |
| 5706 | } |
| 5707 | |
| 5708 | static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) { |
| 5709 | SDValue LHS = Op.getOperand(0); |
| 5710 | SDValue RHS = Op.getOperand(1); |
| 5711 | SDValue Carry = Op.getOperand(2); |
| 5712 | SDValue Cond = Op.getOperand(3); |
| 5713 | SDLoc DL(Op); |
| 5714 | |
| 5715 | assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only." ); |
| 5716 | |
| 5717 | // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we |
| 5718 | // have to invert the carry first. |
| 5719 | Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, |
| 5720 | DAG.getConstant(1, DL, MVT::i32), Carry); |
| 5721 | // This converts the boolean value carry into the carry flag. |
| 5722 | Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); |
| 5723 | |
| 5724 | SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); |
| 5725 | SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry); |
| 5726 | |
| 5727 | SDValue FVal = DAG.getConstant(0, DL, MVT::i32); |
| 5728 | SDValue TVal = DAG.getConstant(1, DL, MVT::i32); |
| 5729 | SDValue ARMcc = DAG.getConstant( |
| 5730 | IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32); |
| 5731 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 5732 | SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR, |
| 5733 | Cmp.getValue(1), SDValue()); |
| 5734 | return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc, |
| 5735 | CCR, Chain.getValue(1)); |
| 5736 | } |
| 5737 | |
| 5738 | /// isNEONModifiedImm - Check if the specified splat value corresponds to a |
| 5739 | /// valid vector constant for a NEON instruction with a "modified immediate" |
| 5740 | /// operand (e.g., VMOV). If so, return the encoded value. |
| 5741 | static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, |
| 5742 | unsigned SplatBitSize, SelectionDAG &DAG, |
| 5743 | const SDLoc &dl, EVT &VT, bool is128Bits, |
| 5744 | NEONModImmType type) { |
| 5745 | unsigned OpCmode, Imm; |
| 5746 | |
| 5747 | // SplatBitSize is set to the smallest size that splats the vector, so a |
| 5748 | // zero vector will always have SplatBitSize == 8. However, NEON modified |
| 5749 | // immediate instructions others than VMOV do not support the 8-bit encoding |
| 5750 | // of a zero vector, and the default encoding of zero is supposed to be the |
| 5751 | // 32-bit version. |
| 5752 | if (SplatBits == 0) |
| 5753 | SplatBitSize = 32; |
| 5754 | |
| 5755 | switch (SplatBitSize) { |
| 5756 | case 8: |
| 5757 | if (type != VMOVModImm) |
| 5758 | return SDValue(); |
| 5759 | // Any 1-byte value is OK. Op=0, Cmode=1110. |
| 5760 | assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big" ); |
| 5761 | OpCmode = 0xe; |
| 5762 | Imm = SplatBits; |
| 5763 | VT = is128Bits ? MVT::v16i8 : MVT::v8i8; |
| 5764 | break; |
| 5765 | |
| 5766 | case 16: |
| 5767 | // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. |
| 5768 | VT = is128Bits ? MVT::v8i16 : MVT::v4i16; |
| 5769 | if ((SplatBits & ~0xff) == 0) { |
| 5770 | // Value = 0x00nn: Op=x, Cmode=100x. |
| 5771 | OpCmode = 0x8; |
| 5772 | Imm = SplatBits; |
| 5773 | break; |
| 5774 | } |
| 5775 | if ((SplatBits & ~0xff00) == 0) { |
| 5776 | // Value = 0xnn00: Op=x, Cmode=101x. |
| 5777 | OpCmode = 0xa; |
| 5778 | Imm = SplatBits >> 8; |
| 5779 | break; |
| 5780 | } |
| 5781 | return SDValue(); |
| 5782 | |
| 5783 | case 32: |
| 5784 | // NEON's 32-bit VMOV supports splat values where: |
| 5785 | // * only one byte is nonzero, or |
| 5786 | // * the least significant byte is 0xff and the second byte is nonzero, or |
| 5787 | // * the least significant 2 bytes are 0xff and the third is nonzero. |
| 5788 | VT = is128Bits ? MVT::v4i32 : MVT::v2i32; |
| 5789 | if ((SplatBits & ~0xff) == 0) { |
| 5790 | // Value = 0x000000nn: Op=x, Cmode=000x. |
| 5791 | OpCmode = 0; |
| 5792 | Imm = SplatBits; |
| 5793 | break; |
| 5794 | } |
| 5795 | if ((SplatBits & ~0xff00) == 0) { |
| 5796 | // Value = 0x0000nn00: Op=x, Cmode=001x. |
| 5797 | OpCmode = 0x2; |
| 5798 | Imm = SplatBits >> 8; |
| 5799 | break; |
| 5800 | } |
| 5801 | if ((SplatBits & ~0xff0000) == 0) { |
| 5802 | // Value = 0x00nn0000: Op=x, Cmode=010x. |
| 5803 | OpCmode = 0x4; |
| 5804 | Imm = SplatBits >> 16; |
| 5805 | break; |
| 5806 | } |
| 5807 | if ((SplatBits & ~0xff000000) == 0) { |
| 5808 | // Value = 0xnn000000: Op=x, Cmode=011x. |
| 5809 | OpCmode = 0x6; |
| 5810 | Imm = SplatBits >> 24; |
| 5811 | break; |
| 5812 | } |
| 5813 | |
| 5814 | // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC |
| 5815 | if (type == OtherModImm) return SDValue(); |
| 5816 | |
| 5817 | if ((SplatBits & ~0xffff) == 0 && |
| 5818 | ((SplatBits | SplatUndef) & 0xff) == 0xff) { |
| 5819 | // Value = 0x0000nnff: Op=x, Cmode=1100. |
| 5820 | OpCmode = 0xc; |
| 5821 | Imm = SplatBits >> 8; |
| 5822 | break; |
| 5823 | } |
| 5824 | |
| 5825 | if ((SplatBits & ~0xffffff) == 0 && |
| 5826 | ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { |
| 5827 | // Value = 0x00nnffff: Op=x, Cmode=1101. |
| 5828 | OpCmode = 0xd; |
| 5829 | Imm = SplatBits >> 16; |
| 5830 | break; |
| 5831 | } |
| 5832 | |
| 5833 | // Note: there are a few 32-bit splat values (specifically: 00ffff00, |
| 5834 | // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not |
| 5835 | // VMOV.I32. A (very) minor optimization would be to replicate the value |
| 5836 | // and fall through here to test for a valid 64-bit splat. But, then the |
| 5837 | // caller would also need to check and handle the change in size. |
| 5838 | return SDValue(); |
| 5839 | |
| 5840 | case 64: { |
| 5841 | if (type != VMOVModImm) |
| 5842 | return SDValue(); |
| 5843 | // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. |
| 5844 | uint64_t BitMask = 0xff; |
| 5845 | uint64_t Val = 0; |
| 5846 | unsigned ImmMask = 1; |
| 5847 | Imm = 0; |
| 5848 | for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { |
| 5849 | if (((SplatBits | SplatUndef) & BitMask) == BitMask) { |
| 5850 | Val |= BitMask; |
| 5851 | Imm |= ImmMask; |
| 5852 | } else if ((SplatBits & BitMask) != 0) { |
| 5853 | return SDValue(); |
| 5854 | } |
| 5855 | BitMask <<= 8; |
| 5856 | ImmMask <<= 1; |
| 5857 | } |
| 5858 | |
| 5859 | if (DAG.getDataLayout().isBigEndian()) |
| 5860 | // swap higher and lower 32 bit word |
| 5861 | Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4); |
| 5862 | |
| 5863 | // Op=1, Cmode=1110. |
| 5864 | OpCmode = 0x1e; |
| 5865 | VT = is128Bits ? MVT::v2i64 : MVT::v1i64; |
| 5866 | break; |
| 5867 | } |
| 5868 | |
| 5869 | default: |
| 5870 | llvm_unreachable("unexpected size for isNEONModifiedImm" ); |
| 5871 | } |
| 5872 | |
| 5873 | unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); |
| 5874 | return DAG.getTargetConstant(EncodedVal, dl, MVT::i32); |
| 5875 | } |
| 5876 | |
| 5877 | SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, |
| 5878 | const ARMSubtarget *ST) const { |
| 5879 | EVT VT = Op.getValueType(); |
| 5880 | bool IsDouble = (VT == MVT::f64); |
| 5881 | ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); |
| 5882 | const APFloat &FPVal = CFP->getValueAPF(); |
| 5883 | |
| 5884 | // Prevent floating-point constants from using literal loads |
| 5885 | // when execute-only is enabled. |
| 5886 | if (ST->genExecuteOnly()) { |
| 5887 | // If we can represent the constant as an immediate, don't lower it |
| 5888 | if (isFPImmLegal(FPVal, VT)) |
| 5889 | return Op; |
| 5890 | // Otherwise, construct as integer, and move to float register |
| 5891 | APInt INTVal = FPVal.bitcastToAPInt(); |
| 5892 | SDLoc DL(CFP); |
| 5893 | switch (VT.getSimpleVT().SimpleTy) { |
| 5894 | default: |
| 5895 | llvm_unreachable("Unknown floating point type!" ); |
| 5896 | break; |
| 5897 | case MVT::f64: { |
| 5898 | SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32); |
| 5899 | SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32); |
| 5900 | if (!ST->isLittle()) |
| 5901 | std::swap(Lo, Hi); |
| 5902 | return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi); |
| 5903 | } |
| 5904 | case MVT::f32: |
| 5905 | return DAG.getNode(ARMISD::VMOVSR, DL, VT, |
| 5906 | DAG.getConstant(INTVal, DL, MVT::i32)); |
| 5907 | } |
| 5908 | } |
| 5909 | |
| 5910 | if (!ST->hasVFP3Base()) |
| 5911 | return SDValue(); |
| 5912 | |
| 5913 | // Use the default (constant pool) lowering for double constants when we have |
| 5914 | // an SP-only FPU |
| 5915 | if (IsDouble && !Subtarget->hasFP64()) |
| 5916 | return SDValue(); |
| 5917 | |
| 5918 | // Try splatting with a VMOV.f32... |
| 5919 | int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal); |
| 5920 | |
| 5921 | if (ImmVal != -1) { |
| 5922 | if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { |
| 5923 | // We have code in place to select a valid ConstantFP already, no need to |
| 5924 | // do any mangling. |
| 5925 | return Op; |
| 5926 | } |
| 5927 | |
| 5928 | // It's a float and we are trying to use NEON operations where |
| 5929 | // possible. Lower it to a splat followed by an extract. |
| 5930 | SDLoc DL(Op); |
| 5931 | SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32); |
| 5932 | SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, |
| 5933 | NewVal); |
| 5934 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, |
| 5935 | DAG.getConstant(0, DL, MVT::i32)); |
| 5936 | } |
| 5937 | |
| 5938 | // The rest of our options are NEON only, make sure that's allowed before |
| 5939 | // proceeding.. |
| 5940 | if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) |
| 5941 | return SDValue(); |
| 5942 | |
| 5943 | EVT VMovVT; |
| 5944 | uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); |
| 5945 | |
| 5946 | // It wouldn't really be worth bothering for doubles except for one very |
| 5947 | // important value, which does happen to match: 0.0. So make sure we don't do |
| 5948 | // anything stupid. |
| 5949 | if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) |
| 5950 | return SDValue(); |
| 5951 | |
| 5952 | // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). |
| 5953 | SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), |
| 5954 | VMovVT, false, VMOVModImm); |
| 5955 | if (NewVal != SDValue()) { |
| 5956 | SDLoc DL(Op); |
| 5957 | SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, |
| 5958 | NewVal); |
| 5959 | if (IsDouble) |
| 5960 | return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); |
| 5961 | |
| 5962 | // It's a float: cast and extract a vector element. |
| 5963 | SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, |
| 5964 | VecConstant); |
| 5965 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, |
| 5966 | DAG.getConstant(0, DL, MVT::i32)); |
| 5967 | } |
| 5968 | |
| 5969 | // Finally, try a VMVN.i32 |
| 5970 | NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT, |
| 5971 | false, VMVNModImm); |
| 5972 | if (NewVal != SDValue()) { |
| 5973 | SDLoc DL(Op); |
| 5974 | SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); |
| 5975 | |
| 5976 | if (IsDouble) |
| 5977 | return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); |
| 5978 | |
| 5979 | // It's a float: cast and extract a vector element. |
| 5980 | SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, |
| 5981 | VecConstant); |
| 5982 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, |
| 5983 | DAG.getConstant(0, DL, MVT::i32)); |
| 5984 | } |
| 5985 | |
| 5986 | return SDValue(); |
| 5987 | } |
| 5988 | |
| 5989 | // check if an VEXT instruction can handle the shuffle mask when the |
| 5990 | // vector sources of the shuffle are the same. |
| 5991 | static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { |
| 5992 | unsigned NumElts = VT.getVectorNumElements(); |
| 5993 | |
| 5994 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
| 5995 | if (M[0] < 0) |
| 5996 | return false; |
| 5997 | |
| 5998 | Imm = M[0]; |
| 5999 | |
| 6000 | // If this is a VEXT shuffle, the immediate value is the index of the first |
| 6001 | // element. The other shuffle indices must be the successive elements after |
| 6002 | // the first one. |
| 6003 | unsigned ExpectedElt = Imm; |
| 6004 | for (unsigned i = 1; i < NumElts; ++i) { |
| 6005 | // Increment the expected index. If it wraps around, just follow it |
| 6006 | // back to index zero and keep going. |
| 6007 | ++ExpectedElt; |
| 6008 | if (ExpectedElt == NumElts) |
| 6009 | ExpectedElt = 0; |
| 6010 | |
| 6011 | if (M[i] < 0) continue; // ignore UNDEF indices |
| 6012 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
| 6013 | return false; |
| 6014 | } |
| 6015 | |
| 6016 | return true; |
| 6017 | } |
| 6018 | |
| 6019 | static bool isVEXTMask(ArrayRef<int> M, EVT VT, |
| 6020 | bool &ReverseVEXT, unsigned &Imm) { |
| 6021 | unsigned NumElts = VT.getVectorNumElements(); |
| 6022 | ReverseVEXT = false; |
| 6023 | |
| 6024 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
| 6025 | if (M[0] < 0) |
| 6026 | return false; |
| 6027 | |
| 6028 | Imm = M[0]; |
| 6029 | |
| 6030 | // If this is a VEXT shuffle, the immediate value is the index of the first |
| 6031 | // element. The other shuffle indices must be the successive elements after |
| 6032 | // the first one. |
| 6033 | unsigned ExpectedElt = Imm; |
| 6034 | for (unsigned i = 1; i < NumElts; ++i) { |
| 6035 | // Increment the expected index. If it wraps around, it may still be |
| 6036 | // a VEXT but the source vectors must be swapped. |
| 6037 | ExpectedElt += 1; |
| 6038 | if (ExpectedElt == NumElts * 2) { |
| 6039 | ExpectedElt = 0; |
| 6040 | ReverseVEXT = true; |
| 6041 | } |
| 6042 | |
| 6043 | if (M[i] < 0) continue; // ignore UNDEF indices |
| 6044 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
| 6045 | return false; |
| 6046 | } |
| 6047 | |
| 6048 | // Adjust the index value if the source operands will be swapped. |
| 6049 | if (ReverseVEXT) |
| 6050 | Imm -= NumElts; |
| 6051 | |
| 6052 | return true; |
| 6053 | } |
| 6054 | |
| 6055 | /// isVREVMask - Check if a vector shuffle corresponds to a VREV |
| 6056 | /// instruction with the specified blocksize. (The order of the elements |
| 6057 | /// within each block of the vector is reversed.) |
| 6058 | static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { |
| 6059 | assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && |
| 6060 | "Only possible block sizes for VREV are: 16, 32, 64" ); |
| 6061 | |
| 6062 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 6063 | if (EltSz == 64) |
| 6064 | return false; |
| 6065 | |
| 6066 | unsigned NumElts = VT.getVectorNumElements(); |
| 6067 | unsigned BlockElts = M[0] + 1; |
| 6068 | // If the first shuffle index is UNDEF, be optimistic. |
| 6069 | if (M[0] < 0) |
| 6070 | BlockElts = BlockSize / EltSz; |
| 6071 | |
| 6072 | if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) |
| 6073 | return false; |
| 6074 | |
| 6075 | for (unsigned i = 0; i < NumElts; ++i) { |
| 6076 | if (M[i] < 0) continue; // ignore UNDEF indices |
| 6077 | if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) |
| 6078 | return false; |
| 6079 | } |
| 6080 | |
| 6081 | return true; |
| 6082 | } |
| 6083 | |
| 6084 | static bool isVTBLMask(ArrayRef<int> M, EVT VT) { |
| 6085 | // We can handle <8 x i8> vector shuffles. If the index in the mask is out of |
| 6086 | // range, then 0 is placed into the resulting vector. So pretty much any mask |
| 6087 | // of 8 elements can work here. |
| 6088 | return VT == MVT::v8i8 && M.size() == 8; |
| 6089 | } |
| 6090 | |
| 6091 | static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask, |
| 6092 | unsigned Index) { |
| 6093 | if (Mask.size() == Elements * 2) |
| 6094 | return Index / Elements; |
| 6095 | return Mask[Index] == 0 ? 0 : 1; |
| 6096 | } |
| 6097 | |
| 6098 | // Checks whether the shuffle mask represents a vector transpose (VTRN) by |
| 6099 | // checking that pairs of elements in the shuffle mask represent the same index |
| 6100 | // in each vector, incrementing the expected index by 2 at each step. |
| 6101 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] |
| 6102 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} |
| 6103 | // v2={e,f,g,h} |
| 6104 | // WhichResult gives the offset for each element in the mask based on which |
| 6105 | // of the two results it belongs to. |
| 6106 | // |
| 6107 | // The transpose can be represented either as: |
| 6108 | // result1 = shufflevector v1, v2, result1_shuffle_mask |
| 6109 | // result2 = shufflevector v1, v2, result2_shuffle_mask |
| 6110 | // where v1/v2 and the shuffle masks have the same number of elements |
| 6111 | // (here WhichResult (see below) indicates which result is being checked) |
| 6112 | // |
| 6113 | // or as: |
| 6114 | // results = shufflevector v1, v2, shuffle_mask |
| 6115 | // where both results are returned in one vector and the shuffle mask has twice |
| 6116 | // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we |
| 6117 | // want to check the low half and high half of the shuffle mask as if it were |
| 6118 | // the other case |
| 6119 | static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 6120 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 6121 | if (EltSz == 64) |
| 6122 | return false; |
| 6123 | |
| 6124 | unsigned NumElts = VT.getVectorNumElements(); |
| 6125 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 6126 | return false; |
| 6127 | |
| 6128 | // If the mask is twice as long as the input vector then we need to check the |
| 6129 | // upper and lower parts of the mask with a matching value for WhichResult |
| 6130 | // FIXME: A mask with only even values will be rejected in case the first |
| 6131 | // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only |
| 6132 | // M[0] is used to determine WhichResult |
| 6133 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 6134 | WhichResult = SelectPairHalf(NumElts, M, i); |
| 6135 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 6136 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
| 6137 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) |
| 6138 | return false; |
| 6139 | } |
| 6140 | } |
| 6141 | |
| 6142 | if (M.size() == NumElts*2) |
| 6143 | WhichResult = 0; |
| 6144 | |
| 6145 | return true; |
| 6146 | } |
| 6147 | |
| 6148 | /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of |
| 6149 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 6150 | /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. |
| 6151 | static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 6152 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 6153 | if (EltSz == 64) |
| 6154 | return false; |
| 6155 | |
| 6156 | unsigned NumElts = VT.getVectorNumElements(); |
| 6157 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 6158 | return false; |
| 6159 | |
| 6160 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 6161 | WhichResult = SelectPairHalf(NumElts, M, i); |
| 6162 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 6163 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
| 6164 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) |
| 6165 | return false; |
| 6166 | } |
| 6167 | } |
| 6168 | |
| 6169 | if (M.size() == NumElts*2) |
| 6170 | WhichResult = 0; |
| 6171 | |
| 6172 | return true; |
| 6173 | } |
| 6174 | |
| 6175 | // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking |
| 6176 | // that the mask elements are either all even and in steps of size 2 or all odd |
| 6177 | // and in steps of size 2. |
| 6178 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] |
| 6179 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} |
| 6180 | // v2={e,f,g,h} |
| 6181 | // Requires similar checks to that of isVTRNMask with |
| 6182 | // respect the how results are returned. |
| 6183 | static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 6184 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 6185 | if (EltSz == 64) |
| 6186 | return false; |
| 6187 | |
| 6188 | unsigned NumElts = VT.getVectorNumElements(); |
| 6189 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 6190 | return false; |
| 6191 | |
| 6192 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 6193 | WhichResult = SelectPairHalf(NumElts, M, i); |
| 6194 | for (unsigned j = 0; j < NumElts; ++j) { |
| 6195 | if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) |
| 6196 | return false; |
| 6197 | } |
| 6198 | } |
| 6199 | |
| 6200 | if (M.size() == NumElts*2) |
| 6201 | WhichResult = 0; |
| 6202 | |
| 6203 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 6204 | if (VT.is64BitVector() && EltSz == 32) |
| 6205 | return false; |
| 6206 | |
| 6207 | return true; |
| 6208 | } |
| 6209 | |
| 6210 | /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of |
| 6211 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 6212 | /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, |
| 6213 | static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 6214 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 6215 | if (EltSz == 64) |
| 6216 | return false; |
| 6217 | |
| 6218 | unsigned NumElts = VT.getVectorNumElements(); |
| 6219 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 6220 | return false; |
| 6221 | |
| 6222 | unsigned Half = NumElts / 2; |
| 6223 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 6224 | WhichResult = SelectPairHalf(NumElts, M, i); |
| 6225 | for (unsigned j = 0; j < NumElts; j += Half) { |
| 6226 | unsigned Idx = WhichResult; |
| 6227 | for (unsigned k = 0; k < Half; ++k) { |
| 6228 | int MIdx = M[i + j + k]; |
| 6229 | if (MIdx >= 0 && (unsigned) MIdx != Idx) |
| 6230 | return false; |
| 6231 | Idx += 2; |
| 6232 | } |
| 6233 | } |
| 6234 | } |
| 6235 | |
| 6236 | if (M.size() == NumElts*2) |
| 6237 | WhichResult = 0; |
| 6238 | |
| 6239 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 6240 | if (VT.is64BitVector() && EltSz == 32) |
| 6241 | return false; |
| 6242 | |
| 6243 | return true; |
| 6244 | } |
| 6245 | |
| 6246 | // Checks whether the shuffle mask represents a vector zip (VZIP) by checking |
| 6247 | // that pairs of elements of the shufflemask represent the same index in each |
| 6248 | // vector incrementing sequentially through the vectors. |
| 6249 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] |
| 6250 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} |
| 6251 | // v2={e,f,g,h} |
| 6252 | // Requires similar checks to that of isVTRNMask with respect the how results |
| 6253 | // are returned. |
| 6254 | static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 6255 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 6256 | if (EltSz == 64) |
| 6257 | return false; |
| 6258 | |
| 6259 | unsigned NumElts = VT.getVectorNumElements(); |
| 6260 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 6261 | return false; |
| 6262 | |
| 6263 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 6264 | WhichResult = SelectPairHalf(NumElts, M, i); |
| 6265 | unsigned Idx = WhichResult * NumElts / 2; |
| 6266 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 6267 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
| 6268 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) |
| 6269 | return false; |
| 6270 | Idx += 1; |
| 6271 | } |
| 6272 | } |
| 6273 | |
| 6274 | if (M.size() == NumElts*2) |
| 6275 | WhichResult = 0; |
| 6276 | |
| 6277 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 6278 | if (VT.is64BitVector() && EltSz == 32) |
| 6279 | return false; |
| 6280 | |
| 6281 | return true; |
| 6282 | } |
| 6283 | |
| 6284 | /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of |
| 6285 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 6286 | /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. |
| 6287 | static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 6288 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 6289 | if (EltSz == 64) |
| 6290 | return false; |
| 6291 | |
| 6292 | unsigned NumElts = VT.getVectorNumElements(); |
| 6293 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 6294 | return false; |
| 6295 | |
| 6296 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 6297 | WhichResult = SelectPairHalf(NumElts, M, i); |
| 6298 | unsigned Idx = WhichResult * NumElts / 2; |
| 6299 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 6300 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
| 6301 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) |
| 6302 | return false; |
| 6303 | Idx += 1; |
| 6304 | } |
| 6305 | } |
| 6306 | |
| 6307 | if (M.size() == NumElts*2) |
| 6308 | WhichResult = 0; |
| 6309 | |
| 6310 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 6311 | if (VT.is64BitVector() && EltSz == 32) |
| 6312 | return false; |
| 6313 | |
| 6314 | return true; |
| 6315 | } |
| 6316 | |
| 6317 | /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), |
| 6318 | /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. |
| 6319 | static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, |
| 6320 | unsigned &WhichResult, |
| 6321 | bool &isV_UNDEF) { |
| 6322 | isV_UNDEF = false; |
| 6323 | if (isVTRNMask(ShuffleMask, VT, WhichResult)) |
| 6324 | return ARMISD::VTRN; |
| 6325 | if (isVUZPMask(ShuffleMask, VT, WhichResult)) |
| 6326 | return ARMISD::VUZP; |
| 6327 | if (isVZIPMask(ShuffleMask, VT, WhichResult)) |
| 6328 | return ARMISD::VZIP; |
| 6329 | |
| 6330 | isV_UNDEF = true; |
| 6331 | if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) |
| 6332 | return ARMISD::VTRN; |
| 6333 | if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) |
| 6334 | return ARMISD::VUZP; |
| 6335 | if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) |
| 6336 | return ARMISD::VZIP; |
| 6337 | |
| 6338 | return 0; |
| 6339 | } |
| 6340 | |
| 6341 | /// \return true if this is a reverse operation on an vector. |
| 6342 | static bool isReverseMask(ArrayRef<int> M, EVT VT) { |
| 6343 | unsigned NumElts = VT.getVectorNumElements(); |
| 6344 | // Make sure the mask has the right size. |
| 6345 | if (NumElts != M.size()) |
| 6346 | return false; |
| 6347 | |
| 6348 | // Look for <15, ..., 3, -1, 1, 0>. |
| 6349 | for (unsigned i = 0; i != NumElts; ++i) |
| 6350 | if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) |
| 6351 | return false; |
| 6352 | |
| 6353 | return true; |
| 6354 | } |
| 6355 | |
| 6356 | // If N is an integer constant that can be moved into a register in one |
| 6357 | // instruction, return an SDValue of such a constant (will become a MOV |
| 6358 | // instruction). Otherwise return null. |
| 6359 | static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, |
| 6360 | const ARMSubtarget *ST, const SDLoc &dl) { |
| 6361 | uint64_t Val; |
| 6362 | if (!isa<ConstantSDNode>(N)) |
| 6363 | return SDValue(); |
| 6364 | Val = cast<ConstantSDNode>(N)->getZExtValue(); |
| 6365 | |
| 6366 | if (ST->isThumb1Only()) { |
| 6367 | if (Val <= 255 || ~Val <= 255) |
| 6368 | return DAG.getConstant(Val, dl, MVT::i32); |
| 6369 | } else { |
| 6370 | if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) |
| 6371 | return DAG.getConstant(Val, dl, MVT::i32); |
| 6372 | } |
| 6373 | return SDValue(); |
| 6374 | } |
| 6375 | |
| 6376 | // If this is a case we can't handle, return null and let the default |
| 6377 | // expansion code take care of it. |
| 6378 | SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, |
| 6379 | const ARMSubtarget *ST) const { |
| 6380 | BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); |
| 6381 | SDLoc dl(Op); |
| 6382 | EVT VT = Op.getValueType(); |
| 6383 | |
| 6384 | APInt SplatBits, SplatUndef; |
| 6385 | unsigned SplatBitSize; |
| 6386 | bool HasAnyUndefs; |
| 6387 | if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 6388 | if (SplatUndef.isAllOnesValue()) |
| 6389 | return DAG.getUNDEF(VT); |
| 6390 | |
| 6391 | if (SplatBitSize <= 64) { |
| 6392 | // Check if an immediate VMOV works. |
| 6393 | EVT VmovVT; |
| 6394 | SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), |
| 6395 | SplatUndef.getZExtValue(), SplatBitSize, |
| 6396 | DAG, dl, VmovVT, VT.is128BitVector(), |
| 6397 | VMOVModImm); |
| 6398 | if (Val.getNode()) { |
| 6399 | SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); |
| 6400 | return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); |
| 6401 | } |
| 6402 | |
| 6403 | // Try an immediate VMVN. |
| 6404 | uint64_t NegatedImm = (~SplatBits).getZExtValue(); |
| 6405 | Val = isNEONModifiedImm(NegatedImm, |
| 6406 | SplatUndef.getZExtValue(), SplatBitSize, |
| 6407 | DAG, dl, VmovVT, VT.is128BitVector(), |
| 6408 | VMVNModImm); |
| 6409 | if (Val.getNode()) { |
| 6410 | SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); |
| 6411 | return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); |
| 6412 | } |
| 6413 | |
| 6414 | // Use vmov.f32 to materialize other v2f32 and v4f32 splats. |
| 6415 | if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { |
| 6416 | int ImmVal = ARM_AM::getFP32Imm(SplatBits); |
| 6417 | if (ImmVal != -1) { |
| 6418 | SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32); |
| 6419 | return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); |
| 6420 | } |
| 6421 | } |
| 6422 | } |
| 6423 | } |
| 6424 | |
| 6425 | // Scan through the operands to see if only one value is used. |
| 6426 | // |
| 6427 | // As an optimisation, even if more than one value is used it may be more |
| 6428 | // profitable to splat with one value then change some lanes. |
| 6429 | // |
| 6430 | // Heuristically we decide to do this if the vector has a "dominant" value, |
| 6431 | // defined as splatted to more than half of the lanes. |
| 6432 | unsigned NumElts = VT.getVectorNumElements(); |
| 6433 | bool isOnlyLowElement = true; |
| 6434 | bool usesOnlyOneValue = true; |
| 6435 | bool hasDominantValue = false; |
| 6436 | bool isConstant = true; |
| 6437 | |
| 6438 | // Map of the number of times a particular SDValue appears in the |
| 6439 | // element list. |
| 6440 | DenseMap<SDValue, unsigned> ValueCounts; |
| 6441 | SDValue Value; |
| 6442 | for (unsigned i = 0; i < NumElts; ++i) { |
| 6443 | SDValue V = Op.getOperand(i); |
| 6444 | if (V.isUndef()) |
| 6445 | continue; |
| 6446 | if (i > 0) |
| 6447 | isOnlyLowElement = false; |
| 6448 | if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) |
| 6449 | isConstant = false; |
| 6450 | |
| 6451 | ValueCounts.insert(std::make_pair(V, 0)); |
| 6452 | unsigned &Count = ValueCounts[V]; |
| 6453 | |
| 6454 | // Is this value dominant? (takes up more than half of the lanes) |
| 6455 | if (++Count > (NumElts / 2)) { |
| 6456 | hasDominantValue = true; |
| 6457 | Value = V; |
| 6458 | } |
| 6459 | } |
| 6460 | if (ValueCounts.size() != 1) |
| 6461 | usesOnlyOneValue = false; |
| 6462 | if (!Value.getNode() && !ValueCounts.empty()) |
| 6463 | Value = ValueCounts.begin()->first; |
| 6464 | |
| 6465 | if (ValueCounts.empty()) |
| 6466 | return DAG.getUNDEF(VT); |
| 6467 | |
| 6468 | // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. |
| 6469 | // Keep going if we are hitting this case. |
| 6470 | if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) |
| 6471 | return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); |
| 6472 | |
| 6473 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 6474 | |
| 6475 | // Use VDUP for non-constant splats. For f32 constant splats, reduce to |
| 6476 | // i32 and try again. |
| 6477 | if (hasDominantValue && EltSize <= 32) { |
| 6478 | if (!isConstant) { |
| 6479 | SDValue N; |
| 6480 | |
| 6481 | // If we are VDUPing a value that comes directly from a vector, that will |
| 6482 | // cause an unnecessary move to and from a GPR, where instead we could |
| 6483 | // just use VDUPLANE. We can only do this if the lane being extracted |
| 6484 | // is at a constant index, as the VDUP from lane instructions only have |
| 6485 | // constant-index forms. |
| 6486 | ConstantSDNode *constIndex; |
| 6487 | if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 6488 | (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) { |
| 6489 | // We need to create a new undef vector to use for the VDUPLANE if the |
| 6490 | // size of the vector from which we get the value is different than the |
| 6491 | // size of the vector that we need to create. We will insert the element |
| 6492 | // such that the register coalescer will remove unnecessary copies. |
| 6493 | if (VT != Value->getOperand(0).getValueType()) { |
| 6494 | unsigned index = constIndex->getAPIntValue().getLimitedValue() % |
| 6495 | VT.getVectorNumElements(); |
| 6496 | N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, |
| 6497 | DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), |
| 6498 | Value, DAG.getConstant(index, dl, MVT::i32)), |
| 6499 | DAG.getConstant(index, dl, MVT::i32)); |
| 6500 | } else |
| 6501 | N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, |
| 6502 | Value->getOperand(0), Value->getOperand(1)); |
| 6503 | } else |
| 6504 | N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); |
| 6505 | |
| 6506 | if (!usesOnlyOneValue) { |
| 6507 | // The dominant value was splatted as 'N', but we now have to insert |
| 6508 | // all differing elements. |
| 6509 | for (unsigned I = 0; I < NumElts; ++I) { |
| 6510 | if (Op.getOperand(I) == Value) |
| 6511 | continue; |
| 6512 | SmallVector<SDValue, 3> Ops; |
| 6513 | Ops.push_back(N); |
| 6514 | Ops.push_back(Op.getOperand(I)); |
| 6515 | Ops.push_back(DAG.getConstant(I, dl, MVT::i32)); |
| 6516 | N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops); |
| 6517 | } |
| 6518 | } |
| 6519 | return N; |
| 6520 | } |
| 6521 | if (VT.getVectorElementType().isFloatingPoint()) { |
| 6522 | SmallVector<SDValue, 8> Ops; |
| 6523 | for (unsigned i = 0; i < NumElts; ++i) |
| 6524 | Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, |
| 6525 | Op.getOperand(i))); |
| 6526 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); |
| 6527 | SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); |
| 6528 | Val = LowerBUILD_VECTOR(Val, DAG, ST); |
| 6529 | if (Val.getNode()) |
| 6530 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); |
| 6531 | } |
| 6532 | if (usesOnlyOneValue) { |
| 6533 | SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); |
| 6534 | if (isConstant && Val.getNode()) |
| 6535 | return DAG.getNode(ARMISD::VDUP, dl, VT, Val); |
| 6536 | } |
| 6537 | } |
| 6538 | |
| 6539 | // If all elements are constants and the case above didn't get hit, fall back |
| 6540 | // to the default expansion, which will generate a load from the constant |
| 6541 | // pool. |
| 6542 | if (isConstant) |
| 6543 | return SDValue(); |
| 6544 | |
| 6545 | // Empirical tests suggest this is rarely worth it for vectors of length <= 2. |
| 6546 | if (NumElts >= 4) { |
| 6547 | SDValue shuffle = ReconstructShuffle(Op, DAG); |
| 6548 | if (shuffle != SDValue()) |
| 6549 | return shuffle; |
| 6550 | } |
| 6551 | |
| 6552 | if (VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) { |
| 6553 | // If we haven't found an efficient lowering, try splitting a 128-bit vector |
| 6554 | // into two 64-bit vectors; we might discover a better way to lower it. |
| 6555 | SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts); |
| 6556 | EVT ExtVT = VT.getVectorElementType(); |
| 6557 | EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2); |
| 6558 | SDValue Lower = |
| 6559 | DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2)); |
| 6560 | if (Lower.getOpcode() == ISD::BUILD_VECTOR) |
| 6561 | Lower = LowerBUILD_VECTOR(Lower, DAG, ST); |
| 6562 | SDValue Upper = DAG.getBuildVector( |
| 6563 | HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2)); |
| 6564 | if (Upper.getOpcode() == ISD::BUILD_VECTOR) |
| 6565 | Upper = LowerBUILD_VECTOR(Upper, DAG, ST); |
| 6566 | if (Lower && Upper) |
| 6567 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper); |
| 6568 | } |
| 6569 | |
| 6570 | // Vectors with 32- or 64-bit elements can be built by directly assigning |
| 6571 | // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands |
| 6572 | // will be legalized. |
| 6573 | if (EltSize >= 32) { |
| 6574 | // Do the expansion with floating-point types, since that is what the VFP |
| 6575 | // registers are defined to use, and since i64 is not legal. |
| 6576 | EVT EltVT = EVT::getFloatingPointVT(EltSize); |
| 6577 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); |
| 6578 | SmallVector<SDValue, 8> Ops; |
| 6579 | for (unsigned i = 0; i < NumElts; ++i) |
| 6580 | Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); |
| 6581 | SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); |
| 6582 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); |
| 6583 | } |
| 6584 | |
| 6585 | // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we |
| 6586 | // know the default expansion would otherwise fall back on something even |
| 6587 | // worse. For a vector with one or two non-undef values, that's |
| 6588 | // scalar_to_vector for the elements followed by a shuffle (provided the |
| 6589 | // shuffle is valid for the target) and materialization element by element |
| 6590 | // on the stack followed by a load for everything else. |
| 6591 | if (!isConstant && !usesOnlyOneValue) { |
| 6592 | SDValue Vec = DAG.getUNDEF(VT); |
| 6593 | for (unsigned i = 0 ; i < NumElts; ++i) { |
| 6594 | SDValue V = Op.getOperand(i); |
| 6595 | if (V.isUndef()) |
| 6596 | continue; |
| 6597 | SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32); |
| 6598 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); |
| 6599 | } |
| 6600 | return Vec; |
| 6601 | } |
| 6602 | |
| 6603 | return SDValue(); |
| 6604 | } |
| 6605 | |
| 6606 | // Gather data to see if the operation can be modelled as a |
| 6607 | // shuffle in combination with VEXTs. |
| 6608 | SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, |
| 6609 | SelectionDAG &DAG) const { |
| 6610 | assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
| 6611 | SDLoc dl(Op); |
| 6612 | EVT VT = Op.getValueType(); |
| 6613 | unsigned NumElts = VT.getVectorNumElements(); |
| 6614 | |
| 6615 | struct ShuffleSourceInfo { |
| 6616 | SDValue Vec; |
| 6617 | unsigned MinElt = std::numeric_limits<unsigned>::max(); |
| 6618 | unsigned MaxElt = 0; |
| 6619 | |
| 6620 | // We may insert some combination of BITCASTs and VEXT nodes to force Vec to |
| 6621 | // be compatible with the shuffle we intend to construct. As a result |
| 6622 | // ShuffleVec will be some sliding window into the original Vec. |
| 6623 | SDValue ShuffleVec; |
| 6624 | |
| 6625 | // Code should guarantee that element i in Vec starts at element "WindowBase |
| 6626 | // + i * WindowScale in ShuffleVec". |
| 6627 | int WindowBase = 0; |
| 6628 | int WindowScale = 1; |
| 6629 | |
| 6630 | ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {} |
| 6631 | |
| 6632 | bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } |
| 6633 | }; |
| 6634 | |
| 6635 | // First gather all vectors used as an immediate source for this BUILD_VECTOR |
| 6636 | // node. |
| 6637 | SmallVector<ShuffleSourceInfo, 2> Sources; |
| 6638 | for (unsigned i = 0; i < NumElts; ++i) { |
| 6639 | SDValue V = Op.getOperand(i); |
| 6640 | if (V.isUndef()) |
| 6641 | continue; |
| 6642 | else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { |
| 6643 | // A shuffle can only come from building a vector from various |
| 6644 | // elements of other vectors. |
| 6645 | return SDValue(); |
| 6646 | } else if (!isa<ConstantSDNode>(V.getOperand(1))) { |
| 6647 | // Furthermore, shuffles require a constant mask, whereas extractelts |
| 6648 | // accept variable indices. |
| 6649 | return SDValue(); |
| 6650 | } |
| 6651 | |
| 6652 | // Add this element source to the list if it's not already there. |
| 6653 | SDValue SourceVec = V.getOperand(0); |
| 6654 | auto Source = llvm::find(Sources, SourceVec); |
| 6655 | if (Source == Sources.end()) |
| 6656 | Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); |
| 6657 | |
| 6658 | // Update the minimum and maximum lane number seen. |
| 6659 | unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); |
| 6660 | Source->MinElt = std::min(Source->MinElt, EltNo); |
| 6661 | Source->MaxElt = std::max(Source->MaxElt, EltNo); |
| 6662 | } |
| 6663 | |
| 6664 | // Currently only do something sane when at most two source vectors |
| 6665 | // are involved. |
| 6666 | if (Sources.size() > 2) |
| 6667 | return SDValue(); |
| 6668 | |
| 6669 | // Find out the smallest element size among result and two sources, and use |
| 6670 | // it as element size to build the shuffle_vector. |
| 6671 | EVT SmallestEltTy = VT.getVectorElementType(); |
| 6672 | for (auto &Source : Sources) { |
| 6673 | EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); |
| 6674 | if (SrcEltTy.bitsLT(SmallestEltTy)) |
| 6675 | SmallestEltTy = SrcEltTy; |
| 6676 | } |
| 6677 | unsigned ResMultiplier = |
| 6678 | VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 6679 | NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 6680 | EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); |
| 6681 | |
| 6682 | // If the source vector is too wide or too narrow, we may nevertheless be able |
| 6683 | // to construct a compatible shuffle either by concatenating it with UNDEF or |
| 6684 | // extracting a suitable range of elements. |
| 6685 | for (auto &Src : Sources) { |
| 6686 | EVT SrcVT = Src.ShuffleVec.getValueType(); |
| 6687 | |
| 6688 | if (SrcVT.getSizeInBits() == VT.getSizeInBits()) |
| 6689 | continue; |
| 6690 | |
| 6691 | // This stage of the search produces a source with the same element type as |
| 6692 | // the original, but with a total width matching the BUILD_VECTOR output. |
| 6693 | EVT EltVT = SrcVT.getVectorElementType(); |
| 6694 | unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits(); |
| 6695 | EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); |
| 6696 | |
| 6697 | if (SrcVT.getSizeInBits() < VT.getSizeInBits()) { |
| 6698 | if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits()) |
| 6699 | return SDValue(); |
| 6700 | // We can pad out the smaller vector for free, so if it's part of a |
| 6701 | // shuffle... |
| 6702 | Src.ShuffleVec = |
| 6703 | DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, |
| 6704 | DAG.getUNDEF(Src.ShuffleVec.getValueType())); |
| 6705 | continue; |
| 6706 | } |
| 6707 | |
| 6708 | if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits()) |
| 6709 | return SDValue(); |
| 6710 | |
| 6711 | if (Src.MaxElt - Src.MinElt >= NumSrcElts) { |
| 6712 | // Span too large for a VEXT to cope |
| 6713 | return SDValue(); |
| 6714 | } |
| 6715 | |
| 6716 | if (Src.MinElt >= NumSrcElts) { |
| 6717 | // The extraction can just take the second half |
| 6718 | Src.ShuffleVec = |
| 6719 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, |
| 6720 | DAG.getConstant(NumSrcElts, dl, MVT::i32)); |
| 6721 | Src.WindowBase = -NumSrcElts; |
| 6722 | } else if (Src.MaxElt < NumSrcElts) { |
| 6723 | // The extraction can just take the first half |
| 6724 | Src.ShuffleVec = |
| 6725 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, |
| 6726 | DAG.getConstant(0, dl, MVT::i32)); |
| 6727 | } else { |
| 6728 | // An actual VEXT is needed |
| 6729 | SDValue VEXTSrc1 = |
| 6730 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, |
| 6731 | DAG.getConstant(0, dl, MVT::i32)); |
| 6732 | SDValue VEXTSrc2 = |
| 6733 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, |
| 6734 | DAG.getConstant(NumSrcElts, dl, MVT::i32)); |
| 6735 | |
| 6736 | Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1, |
| 6737 | VEXTSrc2, |
| 6738 | DAG.getConstant(Src.MinElt, dl, MVT::i32)); |
| 6739 | Src.WindowBase = -Src.MinElt; |
| 6740 | } |
| 6741 | } |
| 6742 | |
| 6743 | // Another possible incompatibility occurs from the vector element types. We |
| 6744 | // can fix this by bitcasting the source vectors to the same type we intend |
| 6745 | // for the shuffle. |
| 6746 | for (auto &Src : Sources) { |
| 6747 | EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); |
| 6748 | if (SrcEltTy == SmallestEltTy) |
| 6749 | continue; |
| 6750 | assert(ShuffleVT.getVectorElementType() == SmallestEltTy); |
| 6751 | Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec); |
| 6752 | Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 6753 | Src.WindowBase *= Src.WindowScale; |
| 6754 | } |
| 6755 | |
| 6756 | // Final sanity check before we try to actually produce a shuffle. |
| 6757 | LLVM_DEBUG(for (auto Src |
| 6758 | : Sources) |
| 6759 | assert(Src.ShuffleVec.getValueType() == ShuffleVT);); |
| 6760 | |
| 6761 | // The stars all align, our next step is to produce the mask for the shuffle. |
| 6762 | SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); |
| 6763 | int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); |
| 6764 | for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { |
| 6765 | SDValue Entry = Op.getOperand(i); |
| 6766 | if (Entry.isUndef()) |
| 6767 | continue; |
| 6768 | |
| 6769 | auto Src = llvm::find(Sources, Entry.getOperand(0)); |
| 6770 | int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue(); |
| 6771 | |
| 6772 | // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit |
| 6773 | // trunc. So only std::min(SrcBits, DestBits) actually get defined in this |
| 6774 | // segment. |
| 6775 | EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); |
| 6776 | int BitsDefined = std::min(OrigEltTy.getSizeInBits(), |
| 6777 | VT.getScalarSizeInBits()); |
| 6778 | int LanesDefined = BitsDefined / BitsPerShuffleLane; |
| 6779 | |
| 6780 | // This source is expected to fill ResMultiplier lanes of the final shuffle, |
| 6781 | // starting at the appropriate offset. |
| 6782 | int *LaneMask = &Mask[i * ResMultiplier]; |
| 6783 | |
| 6784 | int = EltNo * Src->WindowScale + Src->WindowBase; |
| 6785 | ExtractBase += NumElts * (Src - Sources.begin()); |
| 6786 | for (int j = 0; j < LanesDefined; ++j) |
| 6787 | LaneMask[j] = ExtractBase + j; |
| 6788 | } |
| 6789 | |
| 6790 | // Final check before we try to produce nonsense... |
| 6791 | if (!isShuffleMaskLegal(Mask, ShuffleVT)) |
| 6792 | return SDValue(); |
| 6793 | |
| 6794 | // We can't handle more than two sources. This should have already |
| 6795 | // been checked before this point. |
| 6796 | assert(Sources.size() <= 2 && "Too many sources!" ); |
| 6797 | |
| 6798 | SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; |
| 6799 | for (unsigned i = 0; i < Sources.size(); ++i) |
| 6800 | ShuffleOps[i] = Sources[i].ShuffleVec; |
| 6801 | |
| 6802 | SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0], |
| 6803 | ShuffleOps[1], Mask); |
| 6804 | return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); |
| 6805 | } |
| 6806 | |
| 6807 | /// isShuffleMaskLegal - Targets can use this to indicate that they only |
| 6808 | /// support *some* VECTOR_SHUFFLE operations, those with specific masks. |
| 6809 | /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values |
| 6810 | /// are assumed to be legal. |
| 6811 | bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { |
| 6812 | if (VT.getVectorNumElements() == 4 && |
| 6813 | (VT.is128BitVector() || VT.is64BitVector())) { |
| 6814 | unsigned PFIndexes[4]; |
| 6815 | for (unsigned i = 0; i != 4; ++i) { |
| 6816 | if (M[i] < 0) |
| 6817 | PFIndexes[i] = 8; |
| 6818 | else |
| 6819 | PFIndexes[i] = M[i]; |
| 6820 | } |
| 6821 | |
| 6822 | // Compute the index in the perfect shuffle table. |
| 6823 | unsigned PFTableIndex = |
| 6824 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
| 6825 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
| 6826 | unsigned Cost = (PFEntry >> 30); |
| 6827 | |
| 6828 | if (Cost <= 4) |
| 6829 | return true; |
| 6830 | } |
| 6831 | |
| 6832 | bool ReverseVEXT, isV_UNDEF; |
| 6833 | unsigned Imm, WhichResult; |
| 6834 | |
| 6835 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 6836 | return (EltSize >= 32 || |
| 6837 | ShuffleVectorSDNode::isSplatMask(&M[0], VT) || |
| 6838 | isVREVMask(M, VT, 64) || |
| 6839 | isVREVMask(M, VT, 32) || |
| 6840 | isVREVMask(M, VT, 16) || |
| 6841 | isVEXTMask(M, VT, ReverseVEXT, Imm) || |
| 6842 | isVTBLMask(M, VT) || |
| 6843 | isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF) || |
| 6844 | ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT))); |
| 6845 | } |
| 6846 | |
| 6847 | /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit |
| 6848 | /// the specified operations to build the shuffle. |
| 6849 | static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, |
| 6850 | SDValue RHS, SelectionDAG &DAG, |
| 6851 | const SDLoc &dl) { |
| 6852 | unsigned OpNum = (PFEntry >> 26) & 0x0F; |
| 6853 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); |
| 6854 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); |
| 6855 | |
| 6856 | enum { |
| 6857 | OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> |
| 6858 | OP_VREV, |
| 6859 | OP_VDUP0, |
| 6860 | OP_VDUP1, |
| 6861 | OP_VDUP2, |
| 6862 | OP_VDUP3, |
| 6863 | OP_VEXT1, |
| 6864 | OP_VEXT2, |
| 6865 | OP_VEXT3, |
| 6866 | OP_VUZPL, // VUZP, left result |
| 6867 | OP_VUZPR, // VUZP, right result |
| 6868 | OP_VZIPL, // VZIP, left result |
| 6869 | OP_VZIPR, // VZIP, right result |
| 6870 | OP_VTRNL, // VTRN, left result |
| 6871 | OP_VTRNR // VTRN, right result |
| 6872 | }; |
| 6873 | |
| 6874 | if (OpNum == OP_COPY) { |
| 6875 | if (LHSID == (1*9+2)*9+3) return LHS; |
| 6876 | assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!" ); |
| 6877 | return RHS; |
| 6878 | } |
| 6879 | |
| 6880 | SDValue OpLHS, OpRHS; |
| 6881 | OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); |
| 6882 | OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); |
| 6883 | EVT VT = OpLHS.getValueType(); |
| 6884 | |
| 6885 | switch (OpNum) { |
| 6886 | default: llvm_unreachable("Unknown shuffle opcode!" ); |
| 6887 | case OP_VREV: |
| 6888 | // VREV divides the vector in half and swaps within the half. |
| 6889 | if (VT.getVectorElementType() == MVT::i32 || |
| 6890 | VT.getVectorElementType() == MVT::f32) |
| 6891 | return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); |
| 6892 | // vrev <4 x i16> -> VREV32 |
| 6893 | if (VT.getVectorElementType() == MVT::i16) |
| 6894 | return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); |
| 6895 | // vrev <4 x i8> -> VREV16 |
| 6896 | assert(VT.getVectorElementType() == MVT::i8); |
| 6897 | return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); |
| 6898 | case OP_VDUP0: |
| 6899 | case OP_VDUP1: |
| 6900 | case OP_VDUP2: |
| 6901 | case OP_VDUP3: |
| 6902 | return DAG.getNode(ARMISD::VDUPLANE, dl, VT, |
| 6903 | OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32)); |
| 6904 | case OP_VEXT1: |
| 6905 | case OP_VEXT2: |
| 6906 | case OP_VEXT3: |
| 6907 | return DAG.getNode(ARMISD::VEXT, dl, VT, |
| 6908 | OpLHS, OpRHS, |
| 6909 | DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32)); |
| 6910 | case OP_VUZPL: |
| 6911 | case OP_VUZPR: |
| 6912 | return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), |
| 6913 | OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); |
| 6914 | case OP_VZIPL: |
| 6915 | case OP_VZIPR: |
| 6916 | return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), |
| 6917 | OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); |
| 6918 | case OP_VTRNL: |
| 6919 | case OP_VTRNR: |
| 6920 | return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), |
| 6921 | OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); |
| 6922 | } |
| 6923 | } |
| 6924 | |
| 6925 | static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, |
| 6926 | ArrayRef<int> ShuffleMask, |
| 6927 | SelectionDAG &DAG) { |
| 6928 | // Check to see if we can use the VTBL instruction. |
| 6929 | SDValue V1 = Op.getOperand(0); |
| 6930 | SDValue V2 = Op.getOperand(1); |
| 6931 | SDLoc DL(Op); |
| 6932 | |
| 6933 | SmallVector<SDValue, 8> VTBLMask; |
| 6934 | for (ArrayRef<int>::iterator |
| 6935 | I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) |
| 6936 | VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32)); |
| 6937 | |
| 6938 | if (V2.getNode()->isUndef()) |
| 6939 | return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, |
| 6940 | DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); |
| 6941 | |
| 6942 | return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, |
| 6943 | DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); |
| 6944 | } |
| 6945 | |
| 6946 | static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, |
| 6947 | SelectionDAG &DAG) { |
| 6948 | SDLoc DL(Op); |
| 6949 | SDValue OpLHS = Op.getOperand(0); |
| 6950 | EVT VT = OpLHS.getValueType(); |
| 6951 | |
| 6952 | assert((VT == MVT::v8i16 || VT == MVT::v16i8) && |
| 6953 | "Expect an v8i16/v16i8 type" ); |
| 6954 | OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS); |
| 6955 | // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now, |
| 6956 | // extract the first 8 bytes into the top double word and the last 8 bytes |
| 6957 | // into the bottom double word. The v8i16 case is similar. |
| 6958 | unsigned = (VT == MVT::v16i8) ? 8 : 4; |
| 6959 | return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS, |
| 6960 | DAG.getConstant(ExtractNum, DL, MVT::i32)); |
| 6961 | } |
| 6962 | |
| 6963 | static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { |
| 6964 | SDValue V1 = Op.getOperand(0); |
| 6965 | SDValue V2 = Op.getOperand(1); |
| 6966 | SDLoc dl(Op); |
| 6967 | EVT VT = Op.getValueType(); |
| 6968 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); |
| 6969 | |
| 6970 | // Convert shuffles that are directly supported on NEON to target-specific |
| 6971 | // DAG nodes, instead of keeping them as shuffles and matching them again |
| 6972 | // during code selection. This is more efficient and avoids the possibility |
| 6973 | // of inconsistencies between legalization and selection. |
| 6974 | // FIXME: floating-point vectors should be canonicalized to integer vectors |
| 6975 | // of the same time so that they get CSEd properly. |
| 6976 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
| 6977 | |
| 6978 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 6979 | if (EltSize <= 32) { |
| 6980 | if (SVN->isSplat()) { |
| 6981 | int Lane = SVN->getSplatIndex(); |
| 6982 | // If this is undef splat, generate it via "just" vdup, if possible. |
| 6983 | if (Lane == -1) Lane = 0; |
| 6984 | |
| 6985 | // Test if V1 is a SCALAR_TO_VECTOR. |
| 6986 | if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { |
| 6987 | return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); |
| 6988 | } |
| 6989 | // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR |
| 6990 | // (and probably will turn into a SCALAR_TO_VECTOR once legalization |
| 6991 | // reaches it). |
| 6992 | if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && |
| 6993 | !isa<ConstantSDNode>(V1.getOperand(0))) { |
| 6994 | bool IsScalarToVector = true; |
| 6995 | for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) |
| 6996 | if (!V1.getOperand(i).isUndef()) { |
| 6997 | IsScalarToVector = false; |
| 6998 | break; |
| 6999 | } |
| 7000 | if (IsScalarToVector) |
| 7001 | return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); |
| 7002 | } |
| 7003 | return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, |
| 7004 | DAG.getConstant(Lane, dl, MVT::i32)); |
| 7005 | } |
| 7006 | |
| 7007 | bool ReverseVEXT = false; |
| 7008 | unsigned Imm = 0; |
| 7009 | if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { |
| 7010 | if (ReverseVEXT) |
| 7011 | std::swap(V1, V2); |
| 7012 | return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, |
| 7013 | DAG.getConstant(Imm, dl, MVT::i32)); |
| 7014 | } |
| 7015 | |
| 7016 | if (isVREVMask(ShuffleMask, VT, 64)) |
| 7017 | return DAG.getNode(ARMISD::VREV64, dl, VT, V1); |
| 7018 | if (isVREVMask(ShuffleMask, VT, 32)) |
| 7019 | return DAG.getNode(ARMISD::VREV32, dl, VT, V1); |
| 7020 | if (isVREVMask(ShuffleMask, VT, 16)) |
| 7021 | return DAG.getNode(ARMISD::VREV16, dl, VT, V1); |
| 7022 | |
| 7023 | if (V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) { |
| 7024 | return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, |
| 7025 | DAG.getConstant(Imm, dl, MVT::i32)); |
| 7026 | } |
| 7027 | |
| 7028 | // Check for Neon shuffles that modify both input vectors in place. |
| 7029 | // If both results are used, i.e., if there are two shuffles with the same |
| 7030 | // source operands and with masks corresponding to both results of one of |
| 7031 | // these operations, DAG memoization will ensure that a single node is |
| 7032 | // used for both shuffles. |
| 7033 | unsigned WhichResult = 0; |
| 7034 | bool isV_UNDEF = false; |
| 7035 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
| 7036 | ShuffleMask, VT, WhichResult, isV_UNDEF)) { |
| 7037 | if (isV_UNDEF) |
| 7038 | V2 = V1; |
| 7039 | return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2) |
| 7040 | .getValue(WhichResult); |
| 7041 | } |
| 7042 | |
| 7043 | // Also check for these shuffles through CONCAT_VECTORS: we canonicalize |
| 7044 | // shuffles that produce a result larger than their operands with: |
| 7045 | // shuffle(concat(v1, undef), concat(v2, undef)) |
| 7046 | // -> |
| 7047 | // shuffle(concat(v1, v2), undef) |
| 7048 | // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). |
| 7049 | // |
| 7050 | // This is useful in the general case, but there are special cases where |
| 7051 | // native shuffles produce larger results: the two-result ops. |
| 7052 | // |
| 7053 | // Look through the concat when lowering them: |
| 7054 | // shuffle(concat(v1, v2), undef) |
| 7055 | // -> |
| 7056 | // concat(VZIP(v1, v2):0, :1) |
| 7057 | // |
| 7058 | if (V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { |
| 7059 | SDValue SubV1 = V1->getOperand(0); |
| 7060 | SDValue SubV2 = V1->getOperand(1); |
| 7061 | EVT SubVT = SubV1.getValueType(); |
| 7062 | |
| 7063 | // We expect these to have been canonicalized to -1. |
| 7064 | assert(llvm::all_of(ShuffleMask, [&](int i) { |
| 7065 | return i < (int)VT.getVectorNumElements(); |
| 7066 | }) && "Unexpected shuffle index into UNDEF operand!" ); |
| 7067 | |
| 7068 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
| 7069 | ShuffleMask, SubVT, WhichResult, isV_UNDEF)) { |
| 7070 | if (isV_UNDEF) |
| 7071 | SubV2 = SubV1; |
| 7072 | assert((WhichResult == 0) && |
| 7073 | "In-place shuffle of concat can only have one result!" ); |
| 7074 | SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT), |
| 7075 | SubV1, SubV2); |
| 7076 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0), |
| 7077 | Res.getValue(1)); |
| 7078 | } |
| 7079 | } |
| 7080 | } |
| 7081 | |
| 7082 | // If the shuffle is not directly supported and it has 4 elements, use |
| 7083 | // the PerfectShuffle-generated table to synthesize it from other shuffles. |
| 7084 | unsigned NumElts = VT.getVectorNumElements(); |
| 7085 | if (NumElts == 4) { |
| 7086 | unsigned PFIndexes[4]; |
| 7087 | for (unsigned i = 0; i != 4; ++i) { |
| 7088 | if (ShuffleMask[i] < 0) |
| 7089 | PFIndexes[i] = 8; |
| 7090 | else |
| 7091 | PFIndexes[i] = ShuffleMask[i]; |
| 7092 | } |
| 7093 | |
| 7094 | // Compute the index in the perfect shuffle table. |
| 7095 | unsigned PFTableIndex = |
| 7096 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
| 7097 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
| 7098 | unsigned Cost = (PFEntry >> 30); |
| 7099 | |
| 7100 | if (Cost <= 4) |
| 7101 | return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); |
| 7102 | } |
| 7103 | |
| 7104 | // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. |
| 7105 | if (EltSize >= 32) { |
| 7106 | // Do the expansion with floating-point types, since that is what the VFP |
| 7107 | // registers are defined to use, and since i64 is not legal. |
| 7108 | EVT EltVT = EVT::getFloatingPointVT(EltSize); |
| 7109 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); |
| 7110 | V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); |
| 7111 | V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); |
| 7112 | SmallVector<SDValue, 8> Ops; |
| 7113 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7114 | if (ShuffleMask[i] < 0) |
| 7115 | Ops.push_back(DAG.getUNDEF(EltVT)); |
| 7116 | else |
| 7117 | Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, |
| 7118 | ShuffleMask[i] < (int)NumElts ? V1 : V2, |
| 7119 | DAG.getConstant(ShuffleMask[i] & (NumElts-1), |
| 7120 | dl, MVT::i32))); |
| 7121 | } |
| 7122 | SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); |
| 7123 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); |
| 7124 | } |
| 7125 | |
| 7126 | if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT)) |
| 7127 | return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG); |
| 7128 | |
| 7129 | if (VT == MVT::v8i8) |
| 7130 | if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) |
| 7131 | return NewOp; |
| 7132 | |
| 7133 | return SDValue(); |
| 7134 | } |
| 7135 | |
| 7136 | static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { |
| 7137 | // INSERT_VECTOR_ELT is legal only for immediate indexes. |
| 7138 | SDValue Lane = Op.getOperand(2); |
| 7139 | if (!isa<ConstantSDNode>(Lane)) |
| 7140 | return SDValue(); |
| 7141 | |
| 7142 | return Op; |
| 7143 | } |
| 7144 | |
| 7145 | static SDValue (SDValue Op, SelectionDAG &DAG) { |
| 7146 | // EXTRACT_VECTOR_ELT is legal only for immediate indexes. |
| 7147 | SDValue Lane = Op.getOperand(1); |
| 7148 | if (!isa<ConstantSDNode>(Lane)) |
| 7149 | return SDValue(); |
| 7150 | |
| 7151 | SDValue Vec = Op.getOperand(0); |
| 7152 | if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) { |
| 7153 | SDLoc dl(Op); |
| 7154 | return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); |
| 7155 | } |
| 7156 | |
| 7157 | return Op; |
| 7158 | } |
| 7159 | |
| 7160 | static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { |
| 7161 | // The only time a CONCAT_VECTORS operation can have legal types is when |
| 7162 | // two 64-bit vectors are concatenated to a 128-bit vector. |
| 7163 | assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && |
| 7164 | "unexpected CONCAT_VECTORS" ); |
| 7165 | SDLoc dl(Op); |
| 7166 | SDValue Val = DAG.getUNDEF(MVT::v2f64); |
| 7167 | SDValue Op0 = Op.getOperand(0); |
| 7168 | SDValue Op1 = Op.getOperand(1); |
| 7169 | if (!Op0.isUndef()) |
| 7170 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, |
| 7171 | DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), |
| 7172 | DAG.getIntPtrConstant(0, dl)); |
| 7173 | if (!Op1.isUndef()) |
| 7174 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, |
| 7175 | DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), |
| 7176 | DAG.getIntPtrConstant(1, dl)); |
| 7177 | return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); |
| 7178 | } |
| 7179 | |
| 7180 | /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each |
| 7181 | /// element has been zero/sign-extended, depending on the isSigned parameter, |
| 7182 | /// from an integer type half its size. |
| 7183 | static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, |
| 7184 | bool isSigned) { |
| 7185 | // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. |
| 7186 | EVT VT = N->getValueType(0); |
| 7187 | if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { |
| 7188 | SDNode *BVN = N->getOperand(0).getNode(); |
| 7189 | if (BVN->getValueType(0) != MVT::v4i32 || |
| 7190 | BVN->getOpcode() != ISD::BUILD_VECTOR) |
| 7191 | return false; |
| 7192 | unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
| 7193 | unsigned HiElt = 1 - LoElt; |
| 7194 | ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); |
| 7195 | ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); |
| 7196 | ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); |
| 7197 | ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); |
| 7198 | if (!Lo0 || !Hi0 || !Lo1 || !Hi1) |
| 7199 | return false; |
| 7200 | if (isSigned) { |
| 7201 | if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && |
| 7202 | Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) |
| 7203 | return true; |
| 7204 | } else { |
| 7205 | if (Hi0->isNullValue() && Hi1->isNullValue()) |
| 7206 | return true; |
| 7207 | } |
| 7208 | return false; |
| 7209 | } |
| 7210 | |
| 7211 | if (N->getOpcode() != ISD::BUILD_VECTOR) |
| 7212 | return false; |
| 7213 | |
| 7214 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 7215 | SDNode *Elt = N->getOperand(i).getNode(); |
| 7216 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { |
| 7217 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 7218 | unsigned HalfSize = EltSize / 2; |
| 7219 | if (isSigned) { |
| 7220 | if (!isIntN(HalfSize, C->getSExtValue())) |
| 7221 | return false; |
| 7222 | } else { |
| 7223 | if (!isUIntN(HalfSize, C->getZExtValue())) |
| 7224 | return false; |
| 7225 | } |
| 7226 | continue; |
| 7227 | } |
| 7228 | return false; |
| 7229 | } |
| 7230 | |
| 7231 | return true; |
| 7232 | } |
| 7233 | |
| 7234 | /// isSignExtended - Check if a node is a vector value that is sign-extended |
| 7235 | /// or a constant BUILD_VECTOR with sign-extended elements. |
| 7236 | static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { |
| 7237 | if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) |
| 7238 | return true; |
| 7239 | if (isExtendedBUILD_VECTOR(N, DAG, true)) |
| 7240 | return true; |
| 7241 | return false; |
| 7242 | } |
| 7243 | |
| 7244 | /// isZeroExtended - Check if a node is a vector value that is zero-extended |
| 7245 | /// or a constant BUILD_VECTOR with zero-extended elements. |
| 7246 | static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { |
| 7247 | if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) |
| 7248 | return true; |
| 7249 | if (isExtendedBUILD_VECTOR(N, DAG, false)) |
| 7250 | return true; |
| 7251 | return false; |
| 7252 | } |
| 7253 | |
| 7254 | static EVT getExtensionTo64Bits(const EVT &OrigVT) { |
| 7255 | if (OrigVT.getSizeInBits() >= 64) |
| 7256 | return OrigVT; |
| 7257 | |
| 7258 | assert(OrigVT.isSimple() && "Expecting a simple value type" ); |
| 7259 | |
| 7260 | MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; |
| 7261 | switch (OrigSimpleTy) { |
| 7262 | default: llvm_unreachable("Unexpected Vector Type" ); |
| 7263 | case MVT::v2i8: |
| 7264 | case MVT::v2i16: |
| 7265 | return MVT::v2i32; |
| 7266 | case MVT::v4i8: |
| 7267 | return MVT::v4i16; |
| 7268 | } |
| 7269 | } |
| 7270 | |
| 7271 | /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total |
| 7272 | /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. |
| 7273 | /// We insert the required extension here to get the vector to fill a D register. |
| 7274 | static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, |
| 7275 | const EVT &OrigTy, |
| 7276 | const EVT &ExtTy, |
| 7277 | unsigned ExtOpcode) { |
| 7278 | // The vector originally had a size of OrigTy. It was then extended to ExtTy. |
| 7279 | // We expect the ExtTy to be 128-bits total. If the OrigTy is less than |
| 7280 | // 64-bits we need to insert a new extension so that it will be 64-bits. |
| 7281 | assert(ExtTy.is128BitVector() && "Unexpected extension size" ); |
| 7282 | if (OrigTy.getSizeInBits() >= 64) |
| 7283 | return N; |
| 7284 | |
| 7285 | // Must extend size to at least 64 bits to be used as an operand for VMULL. |
| 7286 | EVT NewVT = getExtensionTo64Bits(OrigTy); |
| 7287 | |
| 7288 | return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); |
| 7289 | } |
| 7290 | |
| 7291 | /// SkipLoadExtensionForVMULL - return a load of the original vector size that |
| 7292 | /// does not do any sign/zero extension. If the original vector is less |
| 7293 | /// than 64 bits, an appropriate extension will be added after the load to |
| 7294 | /// reach a total size of 64 bits. We have to add the extension separately |
| 7295 | /// because ARM does not have a sign/zero extending load for vectors. |
| 7296 | static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { |
| 7297 | EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT()); |
| 7298 | |
| 7299 | // The load already has the right type. |
| 7300 | if (ExtendedTy == LD->getMemoryVT()) |
| 7301 | return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), |
| 7302 | LD->getBasePtr(), LD->getPointerInfo(), |
| 7303 | LD->getAlignment(), LD->getMemOperand()->getFlags()); |
| 7304 | |
| 7305 | // We need to create a zextload/sextload. We cannot just create a load |
| 7306 | // followed by a zext/zext node because LowerMUL is also run during normal |
| 7307 | // operation legalization where we can't create illegal types. |
| 7308 | return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, |
| 7309 | LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), |
| 7310 | LD->getMemoryVT(), LD->getAlignment(), |
| 7311 | LD->getMemOperand()->getFlags()); |
| 7312 | } |
| 7313 | |
| 7314 | /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, |
| 7315 | /// extending load, or BUILD_VECTOR with extended elements, return the |
| 7316 | /// unextended value. The unextended vector should be 64 bits so that it can |
| 7317 | /// be used as an operand to a VMULL instruction. If the original vector size |
| 7318 | /// before extension is less than 64 bits we add a an extension to resize |
| 7319 | /// the vector to 64 bits. |
| 7320 | static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { |
| 7321 | if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) |
| 7322 | return AddRequiredExtensionForVMULL(N->getOperand(0), DAG, |
| 7323 | N->getOperand(0)->getValueType(0), |
| 7324 | N->getValueType(0), |
| 7325 | N->getOpcode()); |
| 7326 | |
| 7327 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { |
| 7328 | assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) && |
| 7329 | "Expected extending load" ); |
| 7330 | |
| 7331 | SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG); |
| 7332 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1)); |
| 7333 | unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
| 7334 | SDValue extLoad = |
| 7335 | DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad); |
| 7336 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad); |
| 7337 | |
| 7338 | return newLoad; |
| 7339 | } |
| 7340 | |
| 7341 | // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will |
| 7342 | // have been legalized as a BITCAST from v4i32. |
| 7343 | if (N->getOpcode() == ISD::BITCAST) { |
| 7344 | SDNode *BVN = N->getOperand(0).getNode(); |
| 7345 | assert(BVN->getOpcode() == ISD::BUILD_VECTOR && |
| 7346 | BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR" ); |
| 7347 | unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
| 7348 | return DAG.getBuildVector( |
| 7349 | MVT::v2i32, SDLoc(N), |
| 7350 | {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)}); |
| 7351 | } |
| 7352 | // Construct a new BUILD_VECTOR with elements truncated to half the size. |
| 7353 | assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR" ); |
| 7354 | EVT VT = N->getValueType(0); |
| 7355 | unsigned EltSize = VT.getScalarSizeInBits() / 2; |
| 7356 | unsigned NumElts = VT.getVectorNumElements(); |
| 7357 | MVT TruncVT = MVT::getIntegerVT(EltSize); |
| 7358 | SmallVector<SDValue, 8> Ops; |
| 7359 | SDLoc dl(N); |
| 7360 | for (unsigned i = 0; i != NumElts; ++i) { |
| 7361 | ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); |
| 7362 | const APInt &CInt = C->getAPIntValue(); |
| 7363 | // Element types smaller than 32 bits are not legal, so use i32 elements. |
| 7364 | // The values are implicitly truncated so sext vs. zext doesn't matter. |
| 7365 | Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); |
| 7366 | } |
| 7367 | return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); |
| 7368 | } |
| 7369 | |
| 7370 | static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { |
| 7371 | unsigned Opcode = N->getOpcode(); |
| 7372 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
| 7373 | SDNode *N0 = N->getOperand(0).getNode(); |
| 7374 | SDNode *N1 = N->getOperand(1).getNode(); |
| 7375 | return N0->hasOneUse() && N1->hasOneUse() && |
| 7376 | isSignExtended(N0, DAG) && isSignExtended(N1, DAG); |
| 7377 | } |
| 7378 | return false; |
| 7379 | } |
| 7380 | |
| 7381 | static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { |
| 7382 | unsigned Opcode = N->getOpcode(); |
| 7383 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
| 7384 | SDNode *N0 = N->getOperand(0).getNode(); |
| 7385 | SDNode *N1 = N->getOperand(1).getNode(); |
| 7386 | return N0->hasOneUse() && N1->hasOneUse() && |
| 7387 | isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); |
| 7388 | } |
| 7389 | return false; |
| 7390 | } |
| 7391 | |
| 7392 | static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { |
| 7393 | // Multiplications are only custom-lowered for 128-bit vectors so that |
| 7394 | // VMULL can be detected. Otherwise v2i64 multiplications are not legal. |
| 7395 | EVT VT = Op.getValueType(); |
| 7396 | assert(VT.is128BitVector() && VT.isInteger() && |
| 7397 | "unexpected type for custom-lowering ISD::MUL" ); |
| 7398 | SDNode *N0 = Op.getOperand(0).getNode(); |
| 7399 | SDNode *N1 = Op.getOperand(1).getNode(); |
| 7400 | unsigned NewOpc = 0; |
| 7401 | bool isMLA = false; |
| 7402 | bool isN0SExt = isSignExtended(N0, DAG); |
| 7403 | bool isN1SExt = isSignExtended(N1, DAG); |
| 7404 | if (isN0SExt && isN1SExt) |
| 7405 | NewOpc = ARMISD::VMULLs; |
| 7406 | else { |
| 7407 | bool isN0ZExt = isZeroExtended(N0, DAG); |
| 7408 | bool isN1ZExt = isZeroExtended(N1, DAG); |
| 7409 | if (isN0ZExt && isN1ZExt) |
| 7410 | NewOpc = ARMISD::VMULLu; |
| 7411 | else if (isN1SExt || isN1ZExt) { |
| 7412 | // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these |
| 7413 | // into (s/zext A * s/zext C) + (s/zext B * s/zext C) |
| 7414 | if (isN1SExt && isAddSubSExt(N0, DAG)) { |
| 7415 | NewOpc = ARMISD::VMULLs; |
| 7416 | isMLA = true; |
| 7417 | } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { |
| 7418 | NewOpc = ARMISD::VMULLu; |
| 7419 | isMLA = true; |
| 7420 | } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { |
| 7421 | std::swap(N0, N1); |
| 7422 | NewOpc = ARMISD::VMULLu; |
| 7423 | isMLA = true; |
| 7424 | } |
| 7425 | } |
| 7426 | |
| 7427 | if (!NewOpc) { |
| 7428 | if (VT == MVT::v2i64) |
| 7429 | // Fall through to expand this. It is not legal. |
| 7430 | return SDValue(); |
| 7431 | else |
| 7432 | // Other vector multiplications are legal. |
| 7433 | return Op; |
| 7434 | } |
| 7435 | } |
| 7436 | |
| 7437 | // Legalize to a VMULL instruction. |
| 7438 | SDLoc DL(Op); |
| 7439 | SDValue Op0; |
| 7440 | SDValue Op1 = SkipExtensionForVMULL(N1, DAG); |
| 7441 | if (!isMLA) { |
| 7442 | Op0 = SkipExtensionForVMULL(N0, DAG); |
| 7443 | assert(Op0.getValueType().is64BitVector() && |
| 7444 | Op1.getValueType().is64BitVector() && |
| 7445 | "unexpected types for extended operands to VMULL" ); |
| 7446 | return DAG.getNode(NewOpc, DL, VT, Op0, Op1); |
| 7447 | } |
| 7448 | |
| 7449 | // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during |
| 7450 | // isel lowering to take advantage of no-stall back to back vmul + vmla. |
| 7451 | // vmull q0, d4, d6 |
| 7452 | // vmlal q0, d5, d6 |
| 7453 | // is faster than |
| 7454 | // vaddl q0, d4, d5 |
| 7455 | // vmovl q1, d6 |
| 7456 | // vmul q0, q0, q1 |
| 7457 | SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG); |
| 7458 | SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG); |
| 7459 | EVT Op1VT = Op1.getValueType(); |
| 7460 | return DAG.getNode(N0->getOpcode(), DL, VT, |
| 7461 | DAG.getNode(NewOpc, DL, VT, |
| 7462 | DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), |
| 7463 | DAG.getNode(NewOpc, DL, VT, |
| 7464 | DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); |
| 7465 | } |
| 7466 | |
| 7467 | static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, |
| 7468 | SelectionDAG &DAG) { |
| 7469 | // TODO: Should this propagate fast-math-flags? |
| 7470 | |
| 7471 | // Convert to float |
| 7472 | // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); |
| 7473 | // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); |
| 7474 | X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); |
| 7475 | Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); |
| 7476 | X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); |
| 7477 | Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); |
| 7478 | // Get reciprocal estimate. |
| 7479 | // float4 recip = vrecpeq_f32(yf); |
| 7480 | Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
| 7481 | DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), |
| 7482 | Y); |
| 7483 | // Because char has a smaller range than uchar, we can actually get away |
| 7484 | // without any newton steps. This requires that we use a weird bias |
| 7485 | // of 0xb000, however (again, this has been exhaustively tested). |
| 7486 | // float4 result = as_float4(as_int4(xf*recip) + 0xb000); |
| 7487 | X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); |
| 7488 | X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); |
| 7489 | Y = DAG.getConstant(0xb000, dl, MVT::v4i32); |
| 7490 | X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); |
| 7491 | X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); |
| 7492 | // Convert back to short. |
| 7493 | X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); |
| 7494 | X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); |
| 7495 | return X; |
| 7496 | } |
| 7497 | |
| 7498 | static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, |
| 7499 | SelectionDAG &DAG) { |
| 7500 | // TODO: Should this propagate fast-math-flags? |
| 7501 | |
| 7502 | SDValue N2; |
| 7503 | // Convert to float. |
| 7504 | // float4 yf = vcvt_f32_s32(vmovl_s16(y)); |
| 7505 | // float4 xf = vcvt_f32_s32(vmovl_s16(x)); |
| 7506 | N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); |
| 7507 | N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); |
| 7508 | N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); |
| 7509 | N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); |
| 7510 | |
| 7511 | // Use reciprocal estimate and one refinement step. |
| 7512 | // float4 recip = vrecpeq_f32(yf); |
| 7513 | // recip *= vrecpsq_f32(yf, recip); |
| 7514 | N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
| 7515 | DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), |
| 7516 | N1); |
| 7517 | N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
| 7518 | DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), |
| 7519 | N1, N2); |
| 7520 | N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); |
| 7521 | // Because short has a smaller range than ushort, we can actually get away |
| 7522 | // with only a single newton step. This requires that we use a weird bias |
| 7523 | // of 89, however (again, this has been exhaustively tested). |
| 7524 | // float4 result = as_float4(as_int4(xf*recip) + 0x89); |
| 7525 | N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); |
| 7526 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); |
| 7527 | N1 = DAG.getConstant(0x89, dl, MVT::v4i32); |
| 7528 | N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); |
| 7529 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); |
| 7530 | // Convert back to integer and return. |
| 7531 | // return vmovn_s32(vcvt_s32_f32(result)); |
| 7532 | N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); |
| 7533 | N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); |
| 7534 | return N0; |
| 7535 | } |
| 7536 | |
| 7537 | static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { |
| 7538 | EVT VT = Op.getValueType(); |
| 7539 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) && |
| 7540 | "unexpected type for custom-lowering ISD::SDIV" ); |
| 7541 | |
| 7542 | SDLoc dl(Op); |
| 7543 | SDValue N0 = Op.getOperand(0); |
| 7544 | SDValue N1 = Op.getOperand(1); |
| 7545 | SDValue N2, N3; |
| 7546 | |
| 7547 | if (VT == MVT::v8i8) { |
| 7548 | N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); |
| 7549 | N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); |
| 7550 | |
| 7551 | N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, |
| 7552 | DAG.getIntPtrConstant(4, dl)); |
| 7553 | N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, |
| 7554 | DAG.getIntPtrConstant(4, dl)); |
| 7555 | N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, |
| 7556 | DAG.getIntPtrConstant(0, dl)); |
| 7557 | N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, |
| 7558 | DAG.getIntPtrConstant(0, dl)); |
| 7559 | |
| 7560 | N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 |
| 7561 | N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 |
| 7562 | |
| 7563 | N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); |
| 7564 | N0 = LowerCONCAT_VECTORS(N0, DAG); |
| 7565 | |
| 7566 | N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); |
| 7567 | return N0; |
| 7568 | } |
| 7569 | return LowerSDIV_v4i16(N0, N1, dl, DAG); |
| 7570 | } |
| 7571 | |
| 7572 | static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { |
| 7573 | // TODO: Should this propagate fast-math-flags? |
| 7574 | EVT VT = Op.getValueType(); |
| 7575 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) && |
| 7576 | "unexpected type for custom-lowering ISD::UDIV" ); |
| 7577 | |
| 7578 | SDLoc dl(Op); |
| 7579 | SDValue N0 = Op.getOperand(0); |
| 7580 | SDValue N1 = Op.getOperand(1); |
| 7581 | SDValue N2, N3; |
| 7582 | |
| 7583 | if (VT == MVT::v8i8) { |
| 7584 | N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); |
| 7585 | N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); |
| 7586 | |
| 7587 | N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, |
| 7588 | DAG.getIntPtrConstant(4, dl)); |
| 7589 | N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, |
| 7590 | DAG.getIntPtrConstant(4, dl)); |
| 7591 | N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, |
| 7592 | DAG.getIntPtrConstant(0, dl)); |
| 7593 | N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, |
| 7594 | DAG.getIntPtrConstant(0, dl)); |
| 7595 | |
| 7596 | N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 |
| 7597 | N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 |
| 7598 | |
| 7599 | N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); |
| 7600 | N0 = LowerCONCAT_VECTORS(N0, DAG); |
| 7601 | |
| 7602 | N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, |
| 7603 | DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl, |
| 7604 | MVT::i32), |
| 7605 | N0); |
| 7606 | return N0; |
| 7607 | } |
| 7608 | |
| 7609 | // v4i16 sdiv ... Convert to float. |
| 7610 | // float4 yf = vcvt_f32_s32(vmovl_u16(y)); |
| 7611 | // float4 xf = vcvt_f32_s32(vmovl_u16(x)); |
| 7612 | N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); |
| 7613 | N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); |
| 7614 | N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); |
| 7615 | SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); |
| 7616 | |
| 7617 | // Use reciprocal estimate and two refinement steps. |
| 7618 | // float4 recip = vrecpeq_f32(yf); |
| 7619 | // recip *= vrecpsq_f32(yf, recip); |
| 7620 | // recip *= vrecpsq_f32(yf, recip); |
| 7621 | N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
| 7622 | DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), |
| 7623 | BN1); |
| 7624 | N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
| 7625 | DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), |
| 7626 | BN1, N2); |
| 7627 | N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); |
| 7628 | N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
| 7629 | DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), |
| 7630 | BN1, N2); |
| 7631 | N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); |
| 7632 | // Simply multiplying by the reciprocal estimate can leave us a few ulps |
| 7633 | // too low, so we add 2 ulps (exhaustive testing shows that this is enough, |
| 7634 | // and that it will never cause us to return an answer too large). |
| 7635 | // float4 result = as_float4(as_int4(xf*recip) + 2); |
| 7636 | N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); |
| 7637 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); |
| 7638 | N1 = DAG.getConstant(2, dl, MVT::v4i32); |
| 7639 | N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); |
| 7640 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); |
| 7641 | // Convert back to integer and return. |
| 7642 | // return vmovn_u32(vcvt_s32_f32(result)); |
| 7643 | N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); |
| 7644 | N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); |
| 7645 | return N0; |
| 7646 | } |
| 7647 | |
| 7648 | static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) { |
| 7649 | SDNode *N = Op.getNode(); |
| 7650 | EVT VT = N->getValueType(0); |
| 7651 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
| 7652 | |
| 7653 | SDValue Carry = Op.getOperand(2); |
| 7654 | |
| 7655 | SDLoc DL(Op); |
| 7656 | |
| 7657 | SDValue Result; |
| 7658 | if (Op.getOpcode() == ISD::ADDCARRY) { |
| 7659 | // This converts the boolean value carry into the carry flag. |
| 7660 | Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); |
| 7661 | |
| 7662 | // Do the addition proper using the carry flag we wanted. |
| 7663 | Result = DAG.getNode(ARMISD::ADDE, DL, VTs, Op.getOperand(0), |
| 7664 | Op.getOperand(1), Carry); |
| 7665 | |
| 7666 | // Now convert the carry flag into a boolean value. |
| 7667 | Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); |
| 7668 | } else { |
| 7669 | // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we |
| 7670 | // have to invert the carry first. |
| 7671 | Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, |
| 7672 | DAG.getConstant(1, DL, MVT::i32), Carry); |
| 7673 | // This converts the boolean value carry into the carry flag. |
| 7674 | Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); |
| 7675 | |
| 7676 | // Do the subtraction proper using the carry flag we wanted. |
| 7677 | Result = DAG.getNode(ARMISD::SUBE, DL, VTs, Op.getOperand(0), |
| 7678 | Op.getOperand(1), Carry); |
| 7679 | |
| 7680 | // Now convert the carry flag into a boolean value. |
| 7681 | Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); |
| 7682 | // But the carry returned by ARMISD::SUBE is not a borrow as expected |
| 7683 | // by ISD::SUBCARRY, so compute 1 - C. |
| 7684 | Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, |
| 7685 | DAG.getConstant(1, DL, MVT::i32), Carry); |
| 7686 | } |
| 7687 | |
| 7688 | // Return both values. |
| 7689 | return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Carry); |
| 7690 | } |
| 7691 | |
| 7692 | SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { |
| 7693 | assert(Subtarget->isTargetDarwin()); |
| 7694 | |
| 7695 | // For iOS, we want to call an alternative entry point: __sincos_stret, |
| 7696 | // return values are passed via sret. |
| 7697 | SDLoc dl(Op); |
| 7698 | SDValue Arg = Op.getOperand(0); |
| 7699 | EVT ArgVT = Arg.getValueType(); |
| 7700 | Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); |
| 7701 | auto PtrVT = getPointerTy(DAG.getDataLayout()); |
| 7702 | |
| 7703 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 7704 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 7705 | |
| 7706 | // Pair of floats / doubles used to pass the result. |
| 7707 | Type *RetTy = StructType::get(ArgTy, ArgTy); |
| 7708 | auto &DL = DAG.getDataLayout(); |
| 7709 | |
| 7710 | ArgListTy Args; |
| 7711 | bool ShouldUseSRet = Subtarget->isAPCS_ABI(); |
| 7712 | SDValue SRet; |
| 7713 | if (ShouldUseSRet) { |
| 7714 | // Create stack object for sret. |
| 7715 | const uint64_t ByteSize = DL.getTypeAllocSize(RetTy); |
| 7716 | const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy); |
| 7717 | int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); |
| 7718 | SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL)); |
| 7719 | |
| 7720 | ArgListEntry Entry; |
| 7721 | Entry.Node = SRet; |
| 7722 | Entry.Ty = RetTy->getPointerTo(); |
| 7723 | Entry.IsSExt = false; |
| 7724 | Entry.IsZExt = false; |
| 7725 | Entry.IsSRet = true; |
| 7726 | Args.push_back(Entry); |
| 7727 | RetTy = Type::getVoidTy(*DAG.getContext()); |
| 7728 | } |
| 7729 | |
| 7730 | ArgListEntry Entry; |
| 7731 | Entry.Node = Arg; |
| 7732 | Entry.Ty = ArgTy; |
| 7733 | Entry.IsSExt = false; |
| 7734 | Entry.IsZExt = false; |
| 7735 | Args.push_back(Entry); |
| 7736 | |
| 7737 | RTLIB::Libcall LC = |
| 7738 | (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32; |
| 7739 | const char *LibcallName = getLibcallName(LC); |
| 7740 | CallingConv::ID CC = getLibcallCallingConv(LC); |
| 7741 | SDValue Callee = DAG.getExternalFunctionSymbol(LibcallName); |
| 7742 | |
| 7743 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 7744 | CLI.setDebugLoc(dl) |
| 7745 | .setChain(DAG.getEntryNode()) |
| 7746 | .setCallee(CC, RetTy, Callee, std::move(Args)) |
| 7747 | .setDiscardResult(ShouldUseSRet); |
| 7748 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
| 7749 | |
| 7750 | if (!ShouldUseSRet) |
| 7751 | return CallResult.first; |
| 7752 | |
| 7753 | SDValue LoadSin = |
| 7754 | DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo()); |
| 7755 | |
| 7756 | // Address of cos field. |
| 7757 | SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet, |
| 7758 | DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl)); |
| 7759 | SDValue LoadCos = |
| 7760 | DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo()); |
| 7761 | |
| 7762 | SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); |
| 7763 | return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, |
| 7764 | LoadSin.getValue(0), LoadCos.getValue(0)); |
| 7765 | } |
| 7766 | |
| 7767 | SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, |
| 7768 | bool Signed, |
| 7769 | SDValue &Chain) const { |
| 7770 | EVT VT = Op.getValueType(); |
| 7771 | assert((VT == MVT::i32 || VT == MVT::i64) && |
| 7772 | "unexpected type for custom lowering DIV" ); |
| 7773 | SDLoc dl(Op); |
| 7774 | |
| 7775 | const char *Name = nullptr; |
| 7776 | if (Signed) |
| 7777 | Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64" ; |
| 7778 | else |
| 7779 | Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64" ; |
| 7780 | |
| 7781 | SDValue ES = DAG.getExternalFunctionSymbol(Name); |
| 7782 | |
| 7783 | ARMTargetLowering::ArgListTy Args; |
| 7784 | |
| 7785 | for (auto AI : {1, 0}) { |
| 7786 | ArgListEntry Arg; |
| 7787 | Arg.Node = Op.getOperand(AI); |
| 7788 | Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext()); |
| 7789 | Args.push_back(Arg); |
| 7790 | } |
| 7791 | |
| 7792 | CallLoweringInfo CLI(DAG); |
| 7793 | CLI.setDebugLoc(dl) |
| 7794 | .setChain(Chain) |
| 7795 | .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()), |
| 7796 | ES, std::move(Args)); |
| 7797 | |
| 7798 | return LowerCallTo(CLI).first; |
| 7799 | } |
| 7800 | |
| 7801 | // This is a code size optimisation: return the original SDIV node to |
| 7802 | // DAGCombiner when we don't want to expand SDIV into a sequence of |
| 7803 | // instructions, and an empty node otherwise which will cause the |
| 7804 | // SDIV to be expanded in DAGCombine. |
| 7805 | SDValue |
| 7806 | ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, |
| 7807 | SelectionDAG &DAG, |
| 7808 | SmallVectorImpl<SDNode *> &Created) const { |
| 7809 | // TODO: Support SREM |
| 7810 | if (N->getOpcode() != ISD::SDIV) |
| 7811 | return SDValue(); |
| 7812 | |
| 7813 | const auto &ST = static_cast<const ARMSubtarget&>(DAG.getSubtarget()); |
| 7814 | const bool MinSize = ST.hasMinSize(); |
| 7815 | const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode() |
| 7816 | : ST.hasDivideInARMMode(); |
| 7817 | |
| 7818 | // Don't touch vector types; rewriting this may lead to scalarizing |
| 7819 | // the int divs. |
| 7820 | if (N->getOperand(0).getValueType().isVector()) |
| 7821 | return SDValue(); |
| 7822 | |
| 7823 | // Bail if MinSize is not set, and also for both ARM and Thumb mode we need |
| 7824 | // hwdiv support for this to be really profitable. |
| 7825 | if (!(MinSize && HasDivide)) |
| 7826 | return SDValue(); |
| 7827 | |
| 7828 | // ARM mode is a bit simpler than Thumb: we can handle large power |
| 7829 | // of 2 immediates with 1 mov instruction; no further checks required, |
| 7830 | // just return the sdiv node. |
| 7831 | if (!ST.isThumb()) |
| 7832 | return SDValue(N, 0); |
| 7833 | |
| 7834 | // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV, |
| 7835 | // and thus lose the code size benefits of a MOVS that requires only 2. |
| 7836 | // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here, |
| 7837 | // but as it's doing exactly this, it's not worth the trouble to get TTI. |
| 7838 | if (Divisor.sgt(128)) |
| 7839 | return SDValue(); |
| 7840 | |
| 7841 | return SDValue(N, 0); |
| 7842 | } |
| 7843 | |
| 7844 | SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, |
| 7845 | bool Signed) const { |
| 7846 | assert(Op.getValueType() == MVT::i32 && |
| 7847 | "unexpected type for custom lowering DIV" ); |
| 7848 | SDLoc dl(Op); |
| 7849 | |
| 7850 | SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other, |
| 7851 | DAG.getEntryNode(), Op.getOperand(1)); |
| 7852 | |
| 7853 | return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); |
| 7854 | } |
| 7855 | |
| 7856 | static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) { |
| 7857 | SDLoc DL(N); |
| 7858 | SDValue Op = N->getOperand(1); |
| 7859 | if (N->getValueType(0) == MVT::i32) |
| 7860 | return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op); |
| 7861 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, |
| 7862 | DAG.getConstant(0, DL, MVT::i32)); |
| 7863 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, |
| 7864 | DAG.getConstant(1, DL, MVT::i32)); |
| 7865 | return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, |
| 7866 | DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi)); |
| 7867 | } |
| 7868 | |
| 7869 | void ARMTargetLowering::ExpandDIV_Windows( |
| 7870 | SDValue Op, SelectionDAG &DAG, bool Signed, |
| 7871 | SmallVectorImpl<SDValue> &Results) const { |
| 7872 | const auto &DL = DAG.getDataLayout(); |
| 7873 | const auto &TLI = DAG.getTargetLoweringInfo(); |
| 7874 | |
| 7875 | assert(Op.getValueType() == MVT::i64 && |
| 7876 | "unexpected type for custom lowering DIV" ); |
| 7877 | SDLoc dl(Op); |
| 7878 | |
| 7879 | SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode()); |
| 7880 | |
| 7881 | SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); |
| 7882 | |
| 7883 | SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result); |
| 7884 | SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result, |
| 7885 | DAG.getConstant(32, dl, TLI.getPointerTy(DL))); |
| 7886 | Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper); |
| 7887 | |
| 7888 | Results.push_back(Lower); |
| 7889 | Results.push_back(Upper); |
| 7890 | } |
| 7891 | |
| 7892 | static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { |
| 7893 | if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering())) |
| 7894 | // Acquire/Release load/store is not legal for targets without a dmb or |
| 7895 | // equivalent available. |
| 7896 | return SDValue(); |
| 7897 | |
| 7898 | // Monotonic load/store is legal for all targets. |
| 7899 | return Op; |
| 7900 | } |
| 7901 | |
| 7902 | static void ReplaceREADCYCLECOUNTER(SDNode *N, |
| 7903 | SmallVectorImpl<SDValue> &Results, |
| 7904 | SelectionDAG &DAG, |
| 7905 | const ARMSubtarget *Subtarget) { |
| 7906 | SDLoc DL(N); |
| 7907 | // Under Power Management extensions, the cycle-count is: |
| 7908 | // mrc p15, #0, <Rt>, c9, c13, #0 |
| 7909 | SDValue Ops[] = { N->getOperand(0), // Chain |
| 7910 | DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32), |
| 7911 | DAG.getConstant(15, DL, MVT::i32), |
| 7912 | DAG.getConstant(0, DL, MVT::i32), |
| 7913 | DAG.getConstant(9, DL, MVT::i32), |
| 7914 | DAG.getConstant(13, DL, MVT::i32), |
| 7915 | DAG.getConstant(0, DL, MVT::i32) |
| 7916 | }; |
| 7917 | |
| 7918 | SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, |
| 7919 | DAG.getVTList(MVT::i32, MVT::Other), Ops); |
| 7920 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32, |
| 7921 | DAG.getConstant(0, DL, MVT::i32))); |
| 7922 | Results.push_back(Cycles32.getValue(1)); |
| 7923 | } |
| 7924 | |
| 7925 | static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { |
| 7926 | SDLoc dl(V.getNode()); |
| 7927 | SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32); |
| 7928 | SDValue VHi = DAG.getAnyExtOrTrunc( |
| 7929 | DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)), |
| 7930 | dl, MVT::i32); |
| 7931 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 7932 | if (isBigEndian) |
| 7933 | std::swap (VLo, VHi); |
| 7934 | SDValue RegClass = |
| 7935 | DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32); |
| 7936 | SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32); |
| 7937 | SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32); |
| 7938 | const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; |
| 7939 | return SDValue( |
| 7940 | DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); |
| 7941 | } |
| 7942 | |
| 7943 | static void ReplaceCMP_SWAP_64Results(SDNode *N, |
| 7944 | SmallVectorImpl<SDValue> & Results, |
| 7945 | SelectionDAG &DAG) { |
| 7946 | assert(N->getValueType(0) == MVT::i64 && |
| 7947 | "AtomicCmpSwap on types less than 64 should be legal" ); |
| 7948 | SDValue Ops[] = {N->getOperand(1), |
| 7949 | createGPRPairNode(DAG, N->getOperand(2)), |
| 7950 | createGPRPairNode(DAG, N->getOperand(3)), |
| 7951 | N->getOperand(0)}; |
| 7952 | SDNode *CmpSwap = DAG.getMachineNode( |
| 7953 | ARM::CMP_SWAP_64, SDLoc(N), |
| 7954 | DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops); |
| 7955 | |
| 7956 | MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand(); |
| 7957 | DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp}); |
| 7958 | |
| 7959 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 7960 | |
| 7961 | Results.push_back( |
| 7962 | DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_1 : ARM::gsub_0, |
| 7963 | SDLoc(N), MVT::i32, SDValue(CmpSwap, 0))); |
| 7964 | Results.push_back( |
| 7965 | DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_0 : ARM::gsub_1, |
| 7966 | SDLoc(N), MVT::i32, SDValue(CmpSwap, 0))); |
| 7967 | Results.push_back(SDValue(CmpSwap, 2)); |
| 7968 | } |
| 7969 | |
| 7970 | static SDValue LowerFPOWI(SDValue Op, const ARMSubtarget &Subtarget, |
| 7971 | SelectionDAG &DAG) { |
| 7972 | const auto &TLI = DAG.getTargetLoweringInfo(); |
| 7973 | |
| 7974 | assert(Subtarget.getTargetTriple().isOSMSVCRT() && |
| 7975 | "Custom lowering is MSVCRT specific!" ); |
| 7976 | |
| 7977 | SDLoc dl(Op); |
| 7978 | SDValue Val = Op.getOperand(0); |
| 7979 | MVT Ty = Val->getSimpleValueType(0); |
| 7980 | SDValue Exponent = DAG.getNode(ISD::SINT_TO_FP, dl, Ty, Op.getOperand(1)); |
| 7981 | SDValue Callee = |
| 7982 | DAG.getExternalFunctionSymbol(Ty == MVT::f32 ? "powf" : "pow" ); |
| 7983 | |
| 7984 | TargetLowering::ArgListTy Args; |
| 7985 | TargetLowering::ArgListEntry Entry; |
| 7986 | |
| 7987 | Entry.Node = Val; |
| 7988 | Entry.Ty = Val.getValueType().getTypeForEVT(*DAG.getContext()); |
| 7989 | Entry.IsZExt = true; |
| 7990 | Args.push_back(Entry); |
| 7991 | |
| 7992 | Entry.Node = Exponent; |
| 7993 | Entry.Ty = Exponent.getValueType().getTypeForEVT(*DAG.getContext()); |
| 7994 | Entry.IsZExt = true; |
| 7995 | Args.push_back(Entry); |
| 7996 | |
| 7997 | Type *LCRTy = Val.getValueType().getTypeForEVT(*DAG.getContext()); |
| 7998 | |
| 7999 | // In the in-chain to the call is the entry node If we are emitting a |
| 8000 | // tailcall, the chain will be mutated if the node has a non-entry input |
| 8001 | // chain. |
| 8002 | SDValue InChain = DAG.getEntryNode(); |
| 8003 | SDValue TCChain = InChain; |
| 8004 | |
| 8005 | const Function &F = DAG.getMachineFunction().getFunction(); |
| 8006 | bool IsTC = TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) && |
| 8007 | F.getReturnType() == LCRTy; |
| 8008 | if (IsTC) |
| 8009 | InChain = TCChain; |
| 8010 | |
| 8011 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 8012 | CLI.setDebugLoc(dl) |
| 8013 | .setChain(InChain) |
| 8014 | .setCallee(CallingConv::ARM_AAPCS_VFP, LCRTy, Callee, std::move(Args)) |
| 8015 | .setTailCall(IsTC); |
| 8016 | std::pair<SDValue, SDValue> CI = TLI.LowerCallTo(CLI); |
| 8017 | |
| 8018 | // Return the chain (the DAG root) if it is a tail call |
| 8019 | return !CI.second.getNode() ? DAG.getRoot() : CI.first; |
| 8020 | } |
| 8021 | |
| 8022 | SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
| 8023 | LLVM_DEBUG(dbgs() << "Lowering node: " ; Op.dump()); |
| 8024 | switch (Op.getOpcode()) { |
| 8025 | default: llvm_unreachable("Don't know how to custom lower this!" ); |
| 8026 | case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); |
| 8027 | case ISD::ConstantPool: return LowerConstantPool(Op, DAG); |
| 8028 | case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); |
| 8029 | case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); |
| 8030 | case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); |
| 8031 | case ISD::SELECT: return LowerSELECT(Op, DAG); |
| 8032 | case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); |
| 8033 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); |
| 8034 | case ISD::BR_CC: return LowerBR_CC(Op, DAG); |
| 8035 | case ISD::BR_JT: return LowerBR_JT(Op, DAG); |
| 8036 | case ISD::VASTART: return LowerVASTART(Op, DAG); |
| 8037 | case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); |
| 8038 | case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); |
| 8039 | case ISD::SINT_TO_FP: |
| 8040 | case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); |
| 8041 | case ISD::FP_TO_SINT: |
| 8042 | case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); |
| 8043 | case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); |
| 8044 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); |
| 8045 | case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); |
| 8046 | case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); |
| 8047 | case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); |
| 8048 | case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); |
| 8049 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, |
| 8050 | Subtarget); |
| 8051 | case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG, Subtarget); |
| 8052 | case ISD::SHL: |
| 8053 | case ISD::SRL: |
| 8054 | case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); |
| 8055 | case ISD::SREM: return LowerREM(Op.getNode(), DAG); |
| 8056 | case ISD::UREM: return LowerREM(Op.getNode(), DAG); |
| 8057 | case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); |
| 8058 | case ISD::SRL_PARTS: |
| 8059 | case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); |
| 8060 | case ISD::CTTZ: |
| 8061 | case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget); |
| 8062 | case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget); |
| 8063 | case ISD::SETCC: return LowerVSETCC(Op, DAG); |
| 8064 | case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG); |
| 8065 | case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); |
| 8066 | case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); |
| 8067 | case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); |
| 8068 | case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); |
| 8069 | case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); |
| 8070 | case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); |
| 8071 | case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); |
| 8072 | case ISD::MUL: return LowerMUL(Op, DAG); |
| 8073 | case ISD::SDIV: |
| 8074 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) |
| 8075 | return LowerDIV_Windows(Op, DAG, /* Signed */ true); |
| 8076 | return LowerSDIV(Op, DAG); |
| 8077 | case ISD::UDIV: |
| 8078 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) |
| 8079 | return LowerDIV_Windows(Op, DAG, /* Signed */ false); |
| 8080 | return LowerUDIV(Op, DAG); |
| 8081 | case ISD::ADDCARRY: |
| 8082 | case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG); |
| 8083 | case ISD::SADDO: |
| 8084 | case ISD::SSUBO: |
| 8085 | return LowerSignedALUO(Op, DAG); |
| 8086 | case ISD::UADDO: |
| 8087 | case ISD::USUBO: |
| 8088 | return LowerUnsignedALUO(Op, DAG); |
| 8089 | case ISD::ATOMIC_LOAD: |
| 8090 | case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); |
| 8091 | case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); |
| 8092 | case ISD::SDIVREM: |
| 8093 | case ISD::UDIVREM: return LowerDivRem(Op, DAG); |
| 8094 | case ISD::DYNAMIC_STACKALLOC: |
| 8095 | if (Subtarget->isTargetWindows()) |
| 8096 | return LowerDYNAMIC_STACKALLOC(Op, DAG); |
| 8097 | llvm_unreachable("Don't know how to custom lower this!" ); |
| 8098 | case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); |
| 8099 | case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); |
| 8100 | case ISD::FPOWI: return LowerFPOWI(Op, *Subtarget, DAG); |
| 8101 | case ARMISD::WIN__DBZCHK: return SDValue(); |
| 8102 | } |
| 8103 | } |
| 8104 | |
| 8105 | static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 8106 | SelectionDAG &DAG) { |
| 8107 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); |
| 8108 | unsigned Opc = 0; |
| 8109 | if (IntNo == Intrinsic::arm_smlald) |
| 8110 | Opc = ARMISD::SMLALD; |
| 8111 | else if (IntNo == Intrinsic::arm_smlaldx) |
| 8112 | Opc = ARMISD::SMLALDX; |
| 8113 | else if (IntNo == Intrinsic::arm_smlsld) |
| 8114 | Opc = ARMISD::SMLSLD; |
| 8115 | else if (IntNo == Intrinsic::arm_smlsldx) |
| 8116 | Opc = ARMISD::SMLSLDX; |
| 8117 | else |
| 8118 | return; |
| 8119 | |
| 8120 | SDLoc dl(N); |
| 8121 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, |
| 8122 | N->getOperand(3), |
| 8123 | DAG.getConstant(0, dl, MVT::i32)); |
| 8124 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, |
| 8125 | N->getOperand(3), |
| 8126 | DAG.getConstant(1, dl, MVT::i32)); |
| 8127 | |
| 8128 | SDValue LongMul = DAG.getNode(Opc, dl, |
| 8129 | DAG.getVTList(MVT::i32, MVT::i32), |
| 8130 | N->getOperand(1), N->getOperand(2), |
| 8131 | Lo, Hi); |
| 8132 | Results.push_back(LongMul.getValue(0)); |
| 8133 | Results.push_back(LongMul.getValue(1)); |
| 8134 | } |
| 8135 | |
| 8136 | /// ReplaceNodeResults - Replace the results of node with an illegal result |
| 8137 | /// type with new values built out of custom code. |
| 8138 | void ARMTargetLowering::ReplaceNodeResults(SDNode *N, |
| 8139 | SmallVectorImpl<SDValue> &Results, |
| 8140 | SelectionDAG &DAG) const { |
| 8141 | SDValue Res; |
| 8142 | switch (N->getOpcode()) { |
| 8143 | default: |
| 8144 | llvm_unreachable("Don't know how to custom expand this!" ); |
| 8145 | case ISD::READ_REGISTER: |
| 8146 | ExpandREAD_REGISTER(N, Results, DAG); |
| 8147 | break; |
| 8148 | case ISD::BITCAST: |
| 8149 | Res = ExpandBITCAST(N, DAG, Subtarget); |
| 8150 | break; |
| 8151 | case ISD::SRL: |
| 8152 | case ISD::SRA: |
| 8153 | Res = Expand64BitShift(N, DAG, Subtarget); |
| 8154 | break; |
| 8155 | case ISD::SREM: |
| 8156 | case ISD::UREM: |
| 8157 | Res = LowerREM(N, DAG); |
| 8158 | break; |
| 8159 | case ISD::SDIVREM: |
| 8160 | case ISD::UDIVREM: |
| 8161 | Res = LowerDivRem(SDValue(N, 0), DAG); |
| 8162 | assert(Res.getNumOperands() == 2 && "DivRem needs two values" ); |
| 8163 | Results.push_back(Res.getValue(0)); |
| 8164 | Results.push_back(Res.getValue(1)); |
| 8165 | return; |
| 8166 | case ISD::READCYCLECOUNTER: |
| 8167 | ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); |
| 8168 | return; |
| 8169 | case ISD::UDIV: |
| 8170 | case ISD::SDIV: |
| 8171 | assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows" ); |
| 8172 | return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV, |
| 8173 | Results); |
| 8174 | case ISD::ATOMIC_CMP_SWAP: |
| 8175 | ReplaceCMP_SWAP_64Results(N, Results, DAG); |
| 8176 | return; |
| 8177 | case ISD::INTRINSIC_WO_CHAIN: |
| 8178 | return ReplaceLongIntrinsic(N, Results, DAG); |
| 8179 | case ISD::ABS: |
| 8180 | lowerABS(N, Results, DAG); |
| 8181 | return ; |
| 8182 | |
| 8183 | } |
| 8184 | if (Res.getNode()) |
| 8185 | Results.push_back(Res); |
| 8186 | } |
| 8187 | |
| 8188 | //===----------------------------------------------------------------------===// |
| 8189 | // ARM Scheduler Hooks |
| 8190 | //===----------------------------------------------------------------------===// |
| 8191 | |
| 8192 | /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and |
| 8193 | /// registers the function context. |
| 8194 | void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, |
| 8195 | MachineBasicBlock *MBB, |
| 8196 | MachineBasicBlock *DispatchBB, |
| 8197 | int FI) const { |
| 8198 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 8199 | "ROPI/RWPI not currently supported with SjLj" ); |
| 8200 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 8201 | DebugLoc dl = MI.getDebugLoc(); |
| 8202 | MachineFunction *MF = MBB->getParent(); |
| 8203 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
| 8204 | MachineConstantPool *MCP = MF->getConstantPool(); |
| 8205 | ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); |
| 8206 | const Function &F = MF->getFunction(); |
| 8207 | |
| 8208 | bool isThumb = Subtarget->isThumb(); |
| 8209 | bool isThumb2 = Subtarget->isThumb2(); |
| 8210 | |
| 8211 | unsigned PCLabelId = AFI->createPICLabelUId(); |
| 8212 | unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; |
| 8213 | ARMConstantPoolValue *CPV = |
| 8214 | ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj); |
| 8215 | unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); |
| 8216 | |
| 8217 | const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass |
| 8218 | : &ARM::GPRRegClass; |
| 8219 | |
| 8220 | // Grab constant pool and fixed stack memory operands. |
| 8221 | MachineMemOperand *CPMMO = |
| 8222 | MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), |
| 8223 | MachineMemOperand::MOLoad, 4, 4); |
| 8224 | |
| 8225 | MachineMemOperand *FIMMOSt = |
| 8226 | MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), |
| 8227 | MachineMemOperand::MOStore, 4, 4); |
| 8228 | |
| 8229 | // Load the address of the dispatch MBB into the jump buffer. |
| 8230 | if (isThumb2) { |
| 8231 | // Incoming value: jbuf |
| 8232 | // ldr.n r5, LCPI1_1 |
| 8233 | // orr r5, r5, #1 |
| 8234 | // add r5, pc |
| 8235 | // str r5, [$jbuf, #+4] ; &jbuf[1] |
| 8236 | unsigned NewVReg1 = MRI->createVirtualRegister(TRC); |
| 8237 | BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) |
| 8238 | .addConstantPoolIndex(CPI) |
| 8239 | .addMemOperand(CPMMO) |
| 8240 | .add(predOps(ARMCC::AL)); |
| 8241 | // Set the low bit because of thumb mode. |
| 8242 | unsigned NewVReg2 = MRI->createVirtualRegister(TRC); |
| 8243 | BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) |
| 8244 | .addReg(NewVReg1, RegState::Kill) |
| 8245 | .addImm(0x01) |
| 8246 | .add(predOps(ARMCC::AL)) |
| 8247 | .add(condCodeOp()); |
| 8248 | unsigned NewVReg3 = MRI->createVirtualRegister(TRC); |
| 8249 | BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) |
| 8250 | .addReg(NewVReg2, RegState::Kill) |
| 8251 | .addImm(PCLabelId); |
| 8252 | BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) |
| 8253 | .addReg(NewVReg3, RegState::Kill) |
| 8254 | .addFrameIndex(FI) |
| 8255 | .addImm(36) // &jbuf[1] :: pc |
| 8256 | .addMemOperand(FIMMOSt) |
| 8257 | .add(predOps(ARMCC::AL)); |
| 8258 | } else if (isThumb) { |
| 8259 | // Incoming value: jbuf |
| 8260 | // ldr.n r1, LCPI1_4 |
| 8261 | // add r1, pc |
| 8262 | // mov r2, #1 |
| 8263 | // orrs r1, r2 |
| 8264 | // add r2, $jbuf, #+4 ; &jbuf[1] |
| 8265 | // str r1, [r2] |
| 8266 | unsigned NewVReg1 = MRI->createVirtualRegister(TRC); |
| 8267 | BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) |
| 8268 | .addConstantPoolIndex(CPI) |
| 8269 | .addMemOperand(CPMMO) |
| 8270 | .add(predOps(ARMCC::AL)); |
| 8271 | unsigned NewVReg2 = MRI->createVirtualRegister(TRC); |
| 8272 | BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) |
| 8273 | .addReg(NewVReg1, RegState::Kill) |
| 8274 | .addImm(PCLabelId); |
| 8275 | // Set the low bit because of thumb mode. |
| 8276 | unsigned NewVReg3 = MRI->createVirtualRegister(TRC); |
| 8277 | BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) |
| 8278 | .addReg(ARM::CPSR, RegState::Define) |
| 8279 | .addImm(1) |
| 8280 | .add(predOps(ARMCC::AL)); |
| 8281 | unsigned NewVReg4 = MRI->createVirtualRegister(TRC); |
| 8282 | BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) |
| 8283 | .addReg(ARM::CPSR, RegState::Define) |
| 8284 | .addReg(NewVReg2, RegState::Kill) |
| 8285 | .addReg(NewVReg3, RegState::Kill) |
| 8286 | .add(predOps(ARMCC::AL)); |
| 8287 | unsigned NewVReg5 = MRI->createVirtualRegister(TRC); |
| 8288 | BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5) |
| 8289 | .addFrameIndex(FI) |
| 8290 | .addImm(36); // &jbuf[1] :: pc |
| 8291 | BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) |
| 8292 | .addReg(NewVReg4, RegState::Kill) |
| 8293 | .addReg(NewVReg5, RegState::Kill) |
| 8294 | .addImm(0) |
| 8295 | .addMemOperand(FIMMOSt) |
| 8296 | .add(predOps(ARMCC::AL)); |
| 8297 | } else { |
| 8298 | // Incoming value: jbuf |
| 8299 | // ldr r1, LCPI1_1 |
| 8300 | // add r1, pc, r1 |
| 8301 | // str r1, [$jbuf, #+4] ; &jbuf[1] |
| 8302 | unsigned NewVReg1 = MRI->createVirtualRegister(TRC); |
| 8303 | BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) |
| 8304 | .addConstantPoolIndex(CPI) |
| 8305 | .addImm(0) |
| 8306 | .addMemOperand(CPMMO) |
| 8307 | .add(predOps(ARMCC::AL)); |
| 8308 | unsigned NewVReg2 = MRI->createVirtualRegister(TRC); |
| 8309 | BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) |
| 8310 | .addReg(NewVReg1, RegState::Kill) |
| 8311 | .addImm(PCLabelId) |
| 8312 | .add(predOps(ARMCC::AL)); |
| 8313 | BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) |
| 8314 | .addReg(NewVReg2, RegState::Kill) |
| 8315 | .addFrameIndex(FI) |
| 8316 | .addImm(36) // &jbuf[1] :: pc |
| 8317 | .addMemOperand(FIMMOSt) |
| 8318 | .add(predOps(ARMCC::AL)); |
| 8319 | } |
| 8320 | } |
| 8321 | |
| 8322 | void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, |
| 8323 | MachineBasicBlock *MBB) const { |
| 8324 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 8325 | DebugLoc dl = MI.getDebugLoc(); |
| 8326 | MachineFunction *MF = MBB->getParent(); |
| 8327 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
| 8328 | MachineFrameInfo &MFI = MF->getFrameInfo(); |
| 8329 | int FI = MFI.getFunctionContextIndex(); |
| 8330 | |
| 8331 | const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass |
| 8332 | : &ARM::GPRnopcRegClass; |
| 8333 | |
| 8334 | // Get a mapping of the call site numbers to all of the landing pads they're |
| 8335 | // associated with. |
| 8336 | DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad; |
| 8337 | unsigned MaxCSNum = 0; |
| 8338 | for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; |
| 8339 | ++BB) { |
| 8340 | if (!BB->isEHPad()) continue; |
| 8341 | |
| 8342 | // FIXME: We should assert that the EH_LABEL is the first MI in the landing |
| 8343 | // pad. |
| 8344 | for (MachineBasicBlock::iterator |
| 8345 | II = BB->begin(), IE = BB->end(); II != IE; ++II) { |
| 8346 | if (!II->isEHLabel()) continue; |
| 8347 | |
| 8348 | MCSymbol *Sym = II->getOperand(0).getMCSymbol(); |
| 8349 | if (!MF->hasCallSiteLandingPad(Sym)) continue; |
| 8350 | |
| 8351 | SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym); |
| 8352 | for (SmallVectorImpl<unsigned>::iterator |
| 8353 | CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); |
| 8354 | CSI != CSE; ++CSI) { |
| 8355 | CallSiteNumToLPad[*CSI].push_back(&*BB); |
| 8356 | MaxCSNum = std::max(MaxCSNum, *CSI); |
| 8357 | } |
| 8358 | break; |
| 8359 | } |
| 8360 | } |
| 8361 | |
| 8362 | // Get an ordered list of the machine basic blocks for the jump table. |
| 8363 | std::vector<MachineBasicBlock*> LPadList; |
| 8364 | SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; |
| 8365 | LPadList.reserve(CallSiteNumToLPad.size()); |
| 8366 | for (unsigned I = 1; I <= MaxCSNum; ++I) { |
| 8367 | SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; |
| 8368 | for (SmallVectorImpl<MachineBasicBlock*>::iterator |
| 8369 | II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { |
| 8370 | LPadList.push_back(*II); |
| 8371 | InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); |
| 8372 | } |
| 8373 | } |
| 8374 | |
| 8375 | assert(!LPadList.empty() && |
| 8376 | "No landing pad destinations for the dispatch jump table!" ); |
| 8377 | |
| 8378 | // Create the jump table and associated information. |
| 8379 | MachineJumpTableInfo *JTI = |
| 8380 | MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); |
| 8381 | unsigned MJTI = JTI->createJumpTableIndex(LPadList); |
| 8382 | |
| 8383 | // Create the MBBs for the dispatch code. |
| 8384 | |
| 8385 | // Shove the dispatch's address into the return slot in the function context. |
| 8386 | MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); |
| 8387 | DispatchBB->setIsEHPad(); |
| 8388 | |
| 8389 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
| 8390 | unsigned trap_opcode; |
| 8391 | if (Subtarget->isThumb()) |
| 8392 | trap_opcode = ARM::tTRAP; |
| 8393 | else |
| 8394 | trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; |
| 8395 | |
| 8396 | BuildMI(TrapBB, dl, TII->get(trap_opcode)); |
| 8397 | DispatchBB->addSuccessor(TrapBB); |
| 8398 | |
| 8399 | MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); |
| 8400 | DispatchBB->addSuccessor(DispContBB); |
| 8401 | |
| 8402 | // Insert and MBBs. |
| 8403 | MF->insert(MF->end(), DispatchBB); |
| 8404 | MF->insert(MF->end(), DispContBB); |
| 8405 | MF->insert(MF->end(), TrapBB); |
| 8406 | |
| 8407 | // Insert code into the entry block that creates and registers the function |
| 8408 | // context. |
| 8409 | SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); |
| 8410 | |
| 8411 | MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( |
| 8412 | MachinePointerInfo::getFixedStack(*MF, FI), |
| 8413 | MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4); |
| 8414 | |
| 8415 | MachineInstrBuilder MIB; |
| 8416 | MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); |
| 8417 | |
| 8418 | const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); |
| 8419 | const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); |
| 8420 | |
| 8421 | // Add a register mask with no preserved registers. This results in all |
| 8422 | // registers being marked as clobbered. This can't work if the dispatch block |
| 8423 | // is in a Thumb1 function and is linked with ARM code which uses the FP |
| 8424 | // registers, as there is no way to preserve the FP registers in Thumb1 mode. |
| 8425 | MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF)); |
| 8426 | |
| 8427 | bool IsPositionIndependent = isPositionIndependent(); |
| 8428 | unsigned NumLPads = LPadList.size(); |
| 8429 | if (Subtarget->isThumb2()) { |
| 8430 | unsigned NewVReg1 = MRI->createVirtualRegister(TRC); |
| 8431 | BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) |
| 8432 | .addFrameIndex(FI) |
| 8433 | .addImm(4) |
| 8434 | .addMemOperand(FIMMOLd) |
| 8435 | .add(predOps(ARMCC::AL)); |
| 8436 | |
| 8437 | if (NumLPads < 256) { |
| 8438 | BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) |
| 8439 | .addReg(NewVReg1) |
| 8440 | .addImm(LPadList.size()) |
| 8441 | .add(predOps(ARMCC::AL)); |
| 8442 | } else { |
| 8443 | unsigned VReg1 = MRI->createVirtualRegister(TRC); |
| 8444 | BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) |
| 8445 | .addImm(NumLPads & 0xFFFF) |
| 8446 | .add(predOps(ARMCC::AL)); |
| 8447 | |
| 8448 | unsigned VReg2 = VReg1; |
| 8449 | if ((NumLPads & 0xFFFF0000) != 0) { |
| 8450 | VReg2 = MRI->createVirtualRegister(TRC); |
| 8451 | BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) |
| 8452 | .addReg(VReg1) |
| 8453 | .addImm(NumLPads >> 16) |
| 8454 | .add(predOps(ARMCC::AL)); |
| 8455 | } |
| 8456 | |
| 8457 | BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) |
| 8458 | .addReg(NewVReg1) |
| 8459 | .addReg(VReg2) |
| 8460 | .add(predOps(ARMCC::AL)); |
| 8461 | } |
| 8462 | |
| 8463 | BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) |
| 8464 | .addMBB(TrapBB) |
| 8465 | .addImm(ARMCC::HI) |
| 8466 | .addReg(ARM::CPSR); |
| 8467 | |
| 8468 | unsigned NewVReg3 = MRI->createVirtualRegister(TRC); |
| 8469 | BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3) |
| 8470 | .addJumpTableIndex(MJTI) |
| 8471 | .add(predOps(ARMCC::AL)); |
| 8472 | |
| 8473 | unsigned NewVReg4 = MRI->createVirtualRegister(TRC); |
| 8474 | BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) |
| 8475 | .addReg(NewVReg3, RegState::Kill) |
| 8476 | .addReg(NewVReg1) |
| 8477 | .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) |
| 8478 | .add(predOps(ARMCC::AL)) |
| 8479 | .add(condCodeOp()); |
| 8480 | |
| 8481 | BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) |
| 8482 | .addReg(NewVReg4, RegState::Kill) |
| 8483 | .addReg(NewVReg1) |
| 8484 | .addJumpTableIndex(MJTI); |
| 8485 | } else if (Subtarget->isThumb()) { |
| 8486 | unsigned NewVReg1 = MRI->createVirtualRegister(TRC); |
| 8487 | BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) |
| 8488 | .addFrameIndex(FI) |
| 8489 | .addImm(1) |
| 8490 | .addMemOperand(FIMMOLd) |
| 8491 | .add(predOps(ARMCC::AL)); |
| 8492 | |
| 8493 | if (NumLPads < 256) { |
| 8494 | BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) |
| 8495 | .addReg(NewVReg1) |
| 8496 | .addImm(NumLPads) |
| 8497 | .add(predOps(ARMCC::AL)); |
| 8498 | } else { |
| 8499 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 8500 | Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); |
| 8501 | const Constant *C = ConstantInt::get(Int32Ty, NumLPads); |
| 8502 | |
| 8503 | // MachineConstantPool wants an explicit alignment. |
| 8504 | unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); |
| 8505 | if (Align == 0) |
| 8506 | Align = MF->getDataLayout().getTypeAllocSize(C->getType()); |
| 8507 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); |
| 8508 | |
| 8509 | unsigned VReg1 = MRI->createVirtualRegister(TRC); |
| 8510 | BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) |
| 8511 | .addReg(VReg1, RegState::Define) |
| 8512 | .addConstantPoolIndex(Idx) |
| 8513 | .add(predOps(ARMCC::AL)); |
| 8514 | BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) |
| 8515 | .addReg(NewVReg1) |
| 8516 | .addReg(VReg1) |
| 8517 | .add(predOps(ARMCC::AL)); |
| 8518 | } |
| 8519 | |
| 8520 | BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) |
| 8521 | .addMBB(TrapBB) |
| 8522 | .addImm(ARMCC::HI) |
| 8523 | .addReg(ARM::CPSR); |
| 8524 | |
| 8525 | unsigned NewVReg2 = MRI->createVirtualRegister(TRC); |
| 8526 | BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) |
| 8527 | .addReg(ARM::CPSR, RegState::Define) |
| 8528 | .addReg(NewVReg1) |
| 8529 | .addImm(2) |
| 8530 | .add(predOps(ARMCC::AL)); |
| 8531 | |
| 8532 | unsigned NewVReg3 = MRI->createVirtualRegister(TRC); |
| 8533 | BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) |
| 8534 | .addJumpTableIndex(MJTI) |
| 8535 | .add(predOps(ARMCC::AL)); |
| 8536 | |
| 8537 | unsigned NewVReg4 = MRI->createVirtualRegister(TRC); |
| 8538 | BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) |
| 8539 | .addReg(ARM::CPSR, RegState::Define) |
| 8540 | .addReg(NewVReg2, RegState::Kill) |
| 8541 | .addReg(NewVReg3) |
| 8542 | .add(predOps(ARMCC::AL)); |
| 8543 | |
| 8544 | MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( |
| 8545 | MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); |
| 8546 | |
| 8547 | unsigned NewVReg5 = MRI->createVirtualRegister(TRC); |
| 8548 | BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) |
| 8549 | .addReg(NewVReg4, RegState::Kill) |
| 8550 | .addImm(0) |
| 8551 | .addMemOperand(JTMMOLd) |
| 8552 | .add(predOps(ARMCC::AL)); |
| 8553 | |
| 8554 | unsigned NewVReg6 = NewVReg5; |
| 8555 | if (IsPositionIndependent) { |
| 8556 | NewVReg6 = MRI->createVirtualRegister(TRC); |
| 8557 | BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) |
| 8558 | .addReg(ARM::CPSR, RegState::Define) |
| 8559 | .addReg(NewVReg5, RegState::Kill) |
| 8560 | .addReg(NewVReg3) |
| 8561 | .add(predOps(ARMCC::AL)); |
| 8562 | } |
| 8563 | |
| 8564 | BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) |
| 8565 | .addReg(NewVReg6, RegState::Kill) |
| 8566 | .addJumpTableIndex(MJTI); |
| 8567 | } else { |
| 8568 | unsigned NewVReg1 = MRI->createVirtualRegister(TRC); |
| 8569 | BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) |
| 8570 | .addFrameIndex(FI) |
| 8571 | .addImm(4) |
| 8572 | .addMemOperand(FIMMOLd) |
| 8573 | .add(predOps(ARMCC::AL)); |
| 8574 | |
| 8575 | if (NumLPads < 256) { |
| 8576 | BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) |
| 8577 | .addReg(NewVReg1) |
| 8578 | .addImm(NumLPads) |
| 8579 | .add(predOps(ARMCC::AL)); |
| 8580 | } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { |
| 8581 | unsigned VReg1 = MRI->createVirtualRegister(TRC); |
| 8582 | BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) |
| 8583 | .addImm(NumLPads & 0xFFFF) |
| 8584 | .add(predOps(ARMCC::AL)); |
| 8585 | |
| 8586 | unsigned VReg2 = VReg1; |
| 8587 | if ((NumLPads & 0xFFFF0000) != 0) { |
| 8588 | VReg2 = MRI->createVirtualRegister(TRC); |
| 8589 | BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) |
| 8590 | .addReg(VReg1) |
| 8591 | .addImm(NumLPads >> 16) |
| 8592 | .add(predOps(ARMCC::AL)); |
| 8593 | } |
| 8594 | |
| 8595 | BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) |
| 8596 | .addReg(NewVReg1) |
| 8597 | .addReg(VReg2) |
| 8598 | .add(predOps(ARMCC::AL)); |
| 8599 | } else { |
| 8600 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 8601 | Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); |
| 8602 | const Constant *C = ConstantInt::get(Int32Ty, NumLPads); |
| 8603 | |
| 8604 | // MachineConstantPool wants an explicit alignment. |
| 8605 | unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); |
| 8606 | if (Align == 0) |
| 8607 | Align = MF->getDataLayout().getTypeAllocSize(C->getType()); |
| 8608 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); |
| 8609 | |
| 8610 | unsigned VReg1 = MRI->createVirtualRegister(TRC); |
| 8611 | BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) |
| 8612 | .addReg(VReg1, RegState::Define) |
| 8613 | .addConstantPoolIndex(Idx) |
| 8614 | .addImm(0) |
| 8615 | .add(predOps(ARMCC::AL)); |
| 8616 | BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) |
| 8617 | .addReg(NewVReg1) |
| 8618 | .addReg(VReg1, RegState::Kill) |
| 8619 | .add(predOps(ARMCC::AL)); |
| 8620 | } |
| 8621 | |
| 8622 | BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) |
| 8623 | .addMBB(TrapBB) |
| 8624 | .addImm(ARMCC::HI) |
| 8625 | .addReg(ARM::CPSR); |
| 8626 | |
| 8627 | unsigned NewVReg3 = MRI->createVirtualRegister(TRC); |
| 8628 | BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) |
| 8629 | .addReg(NewVReg1) |
| 8630 | .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) |
| 8631 | .add(predOps(ARMCC::AL)) |
| 8632 | .add(condCodeOp()); |
| 8633 | unsigned NewVReg4 = MRI->createVirtualRegister(TRC); |
| 8634 | BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) |
| 8635 | .addJumpTableIndex(MJTI) |
| 8636 | .add(predOps(ARMCC::AL)); |
| 8637 | |
| 8638 | MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( |
| 8639 | MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); |
| 8640 | unsigned NewVReg5 = MRI->createVirtualRegister(TRC); |
| 8641 | BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) |
| 8642 | .addReg(NewVReg3, RegState::Kill) |
| 8643 | .addReg(NewVReg4) |
| 8644 | .addImm(0) |
| 8645 | .addMemOperand(JTMMOLd) |
| 8646 | .add(predOps(ARMCC::AL)); |
| 8647 | |
| 8648 | if (IsPositionIndependent) { |
| 8649 | BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) |
| 8650 | .addReg(NewVReg5, RegState::Kill) |
| 8651 | .addReg(NewVReg4) |
| 8652 | .addJumpTableIndex(MJTI); |
| 8653 | } else { |
| 8654 | BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr)) |
| 8655 | .addReg(NewVReg5, RegState::Kill) |
| 8656 | .addJumpTableIndex(MJTI); |
| 8657 | } |
| 8658 | } |
| 8659 | |
| 8660 | // Add the jump table entries as successors to the MBB. |
| 8661 | SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; |
| 8662 | for (std::vector<MachineBasicBlock*>::iterator |
| 8663 | I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { |
| 8664 | MachineBasicBlock *CurMBB = *I; |
| 8665 | if (SeenMBBs.insert(CurMBB).second) |
| 8666 | DispContBB->addSuccessor(CurMBB); |
| 8667 | } |
| 8668 | |
| 8669 | // N.B. the order the invoke BBs are processed in doesn't matter here. |
| 8670 | const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); |
| 8671 | SmallVector<MachineBasicBlock*, 64> MBBLPads; |
| 8672 | for (MachineBasicBlock *BB : InvokeBBs) { |
| 8673 | |
| 8674 | // Remove the landing pad successor from the invoke block and replace it |
| 8675 | // with the new dispatch block. |
| 8676 | SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), |
| 8677 | BB->succ_end()); |
| 8678 | while (!Successors.empty()) { |
| 8679 | MachineBasicBlock *SMBB = Successors.pop_back_val(); |
| 8680 | if (SMBB->isEHPad()) { |
| 8681 | BB->removeSuccessor(SMBB); |
| 8682 | MBBLPads.push_back(SMBB); |
| 8683 | } |
| 8684 | } |
| 8685 | |
| 8686 | BB->addSuccessor(DispatchBB, BranchProbability::getZero()); |
| 8687 | BB->normalizeSuccProbs(); |
| 8688 | |
| 8689 | // Find the invoke call and mark all of the callee-saved registers as |
| 8690 | // 'implicit defined' so that they're spilled. This prevents code from |
| 8691 | // moving instructions to before the EH block, where they will never be |
| 8692 | // executed. |
| 8693 | for (MachineBasicBlock::reverse_iterator |
| 8694 | II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { |
| 8695 | if (!II->isCall()) continue; |
| 8696 | |
| 8697 | DenseMap<unsigned, bool> DefRegs; |
| 8698 | for (MachineInstr::mop_iterator |
| 8699 | OI = II->operands_begin(), OE = II->operands_end(); |
| 8700 | OI != OE; ++OI) { |
| 8701 | if (!OI->isReg()) continue; |
| 8702 | DefRegs[OI->getReg()] = true; |
| 8703 | } |
| 8704 | |
| 8705 | MachineInstrBuilder MIB(*MF, &*II); |
| 8706 | |
| 8707 | for (unsigned i = 0; SavedRegs[i] != 0; ++i) { |
| 8708 | unsigned Reg = SavedRegs[i]; |
| 8709 | if (Subtarget->isThumb2() && |
| 8710 | !ARM::tGPRRegClass.contains(Reg) && |
| 8711 | !ARM::hGPRRegClass.contains(Reg)) |
| 8712 | continue; |
| 8713 | if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) |
| 8714 | continue; |
| 8715 | if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) |
| 8716 | continue; |
| 8717 | if (!DefRegs[Reg]) |
| 8718 | MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); |
| 8719 | } |
| 8720 | |
| 8721 | break; |
| 8722 | } |
| 8723 | } |
| 8724 | |
| 8725 | // Mark all former landing pads as non-landing pads. The dispatch is the only |
| 8726 | // landing pad now. |
| 8727 | for (SmallVectorImpl<MachineBasicBlock*>::iterator |
| 8728 | I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) |
| 8729 | (*I)->setIsEHPad(false); |
| 8730 | |
| 8731 | // The instruction is gone now. |
| 8732 | MI.eraseFromParent(); |
| 8733 | } |
| 8734 | |
| 8735 | static |
| 8736 | MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { |
| 8737 | for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), |
| 8738 | E = MBB->succ_end(); I != E; ++I) |
| 8739 | if (*I != Succ) |
| 8740 | return *I; |
| 8741 | llvm_unreachable("Expecting a BB with two successors!" ); |
| 8742 | } |
| 8743 | |
| 8744 | /// Return the load opcode for a given load size. If load size >= 8, |
| 8745 | /// neon opcode will be returned. |
| 8746 | static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { |
| 8747 | if (LdSize >= 8) |
| 8748 | return LdSize == 16 ? ARM::VLD1q32wb_fixed |
| 8749 | : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; |
| 8750 | if (IsThumb1) |
| 8751 | return LdSize == 4 ? ARM::tLDRi |
| 8752 | : LdSize == 2 ? ARM::tLDRHi |
| 8753 | : LdSize == 1 ? ARM::tLDRBi : 0; |
| 8754 | if (IsThumb2) |
| 8755 | return LdSize == 4 ? ARM::t2LDR_POST |
| 8756 | : LdSize == 2 ? ARM::t2LDRH_POST |
| 8757 | : LdSize == 1 ? ARM::t2LDRB_POST : 0; |
| 8758 | return LdSize == 4 ? ARM::LDR_POST_IMM |
| 8759 | : LdSize == 2 ? ARM::LDRH_POST |
| 8760 | : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; |
| 8761 | } |
| 8762 | |
| 8763 | /// Return the store opcode for a given store size. If store size >= 8, |
| 8764 | /// neon opcode will be returned. |
| 8765 | static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { |
| 8766 | if (StSize >= 8) |
| 8767 | return StSize == 16 ? ARM::VST1q32wb_fixed |
| 8768 | : StSize == 8 ? ARM::VST1d32wb_fixed : 0; |
| 8769 | if (IsThumb1) |
| 8770 | return StSize == 4 ? ARM::tSTRi |
| 8771 | : StSize == 2 ? ARM::tSTRHi |
| 8772 | : StSize == 1 ? ARM::tSTRBi : 0; |
| 8773 | if (IsThumb2) |
| 8774 | return StSize == 4 ? ARM::t2STR_POST |
| 8775 | : StSize == 2 ? ARM::t2STRH_POST |
| 8776 | : StSize == 1 ? ARM::t2STRB_POST : 0; |
| 8777 | return StSize == 4 ? ARM::STR_POST_IMM |
| 8778 | : StSize == 2 ? ARM::STRH_POST |
| 8779 | : StSize == 1 ? ARM::STRB_POST_IMM : 0; |
| 8780 | } |
| 8781 | |
| 8782 | /// Emit a post-increment load operation with given size. The instructions |
| 8783 | /// will be added to BB at Pos. |
| 8784 | static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
| 8785 | const TargetInstrInfo *TII, const DebugLoc &dl, |
| 8786 | unsigned LdSize, unsigned Data, unsigned AddrIn, |
| 8787 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
| 8788 | unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); |
| 8789 | assert(LdOpc != 0 && "Should have a load opcode" ); |
| 8790 | if (LdSize >= 8) { |
| 8791 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) |
| 8792 | .addReg(AddrOut, RegState::Define) |
| 8793 | .addReg(AddrIn) |
| 8794 | .addImm(0) |
| 8795 | .add(predOps(ARMCC::AL)); |
| 8796 | } else if (IsThumb1) { |
| 8797 | // load + update AddrIn |
| 8798 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) |
| 8799 | .addReg(AddrIn) |
| 8800 | .addImm(0) |
| 8801 | .add(predOps(ARMCC::AL)); |
| 8802 | BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) |
| 8803 | .add(t1CondCodeOp()) |
| 8804 | .addReg(AddrIn) |
| 8805 | .addImm(LdSize) |
| 8806 | .add(predOps(ARMCC::AL)); |
| 8807 | } else if (IsThumb2) { |
| 8808 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) |
| 8809 | .addReg(AddrOut, RegState::Define) |
| 8810 | .addReg(AddrIn) |
| 8811 | .addImm(LdSize) |
| 8812 | .add(predOps(ARMCC::AL)); |
| 8813 | } else { // arm |
| 8814 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) |
| 8815 | .addReg(AddrOut, RegState::Define) |
| 8816 | .addReg(AddrIn) |
| 8817 | .addReg(0) |
| 8818 | .addImm(LdSize) |
| 8819 | .add(predOps(ARMCC::AL)); |
| 8820 | } |
| 8821 | } |
| 8822 | |
| 8823 | /// Emit a post-increment store operation with given size. The instructions |
| 8824 | /// will be added to BB at Pos. |
| 8825 | static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
| 8826 | const TargetInstrInfo *TII, const DebugLoc &dl, |
| 8827 | unsigned StSize, unsigned Data, unsigned AddrIn, |
| 8828 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
| 8829 | unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); |
| 8830 | assert(StOpc != 0 && "Should have a store opcode" ); |
| 8831 | if (StSize >= 8) { |
| 8832 | BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) |
| 8833 | .addReg(AddrIn) |
| 8834 | .addImm(0) |
| 8835 | .addReg(Data) |
| 8836 | .add(predOps(ARMCC::AL)); |
| 8837 | } else if (IsThumb1) { |
| 8838 | // store + update AddrIn |
| 8839 | BuildMI(*BB, Pos, dl, TII->get(StOpc)) |
| 8840 | .addReg(Data) |
| 8841 | .addReg(AddrIn) |
| 8842 | .addImm(0) |
| 8843 | .add(predOps(ARMCC::AL)); |
| 8844 | BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) |
| 8845 | .add(t1CondCodeOp()) |
| 8846 | .addReg(AddrIn) |
| 8847 | .addImm(StSize) |
| 8848 | .add(predOps(ARMCC::AL)); |
| 8849 | } else if (IsThumb2) { |
| 8850 | BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) |
| 8851 | .addReg(Data) |
| 8852 | .addReg(AddrIn) |
| 8853 | .addImm(StSize) |
| 8854 | .add(predOps(ARMCC::AL)); |
| 8855 | } else { // arm |
| 8856 | BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) |
| 8857 | .addReg(Data) |
| 8858 | .addReg(AddrIn) |
| 8859 | .addReg(0) |
| 8860 | .addImm(StSize) |
| 8861 | .add(predOps(ARMCC::AL)); |
| 8862 | } |
| 8863 | } |
| 8864 | |
| 8865 | MachineBasicBlock * |
| 8866 | ARMTargetLowering::EmitStructByval(MachineInstr &MI, |
| 8867 | MachineBasicBlock *BB) const { |
| 8868 | // This pseudo instruction has 3 operands: dst, src, size |
| 8869 | // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). |
| 8870 | // Otherwise, we will generate unrolled scalar copies. |
| 8871 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 8872 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 8873 | MachineFunction::iterator It = ++BB->getIterator(); |
| 8874 | |
| 8875 | unsigned dest = MI.getOperand(0).getReg(); |
| 8876 | unsigned src = MI.getOperand(1).getReg(); |
| 8877 | unsigned SizeVal = MI.getOperand(2).getImm(); |
| 8878 | unsigned Align = MI.getOperand(3).getImm(); |
| 8879 | DebugLoc dl = MI.getDebugLoc(); |
| 8880 | |
| 8881 | MachineFunction *MF = BB->getParent(); |
| 8882 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 8883 | unsigned UnitSize = 0; |
| 8884 | const TargetRegisterClass *TRC = nullptr; |
| 8885 | const TargetRegisterClass *VecTRC = nullptr; |
| 8886 | |
| 8887 | bool IsThumb1 = Subtarget->isThumb1Only(); |
| 8888 | bool IsThumb2 = Subtarget->isThumb2(); |
| 8889 | bool IsThumb = Subtarget->isThumb(); |
| 8890 | |
| 8891 | if (Align & 1) { |
| 8892 | UnitSize = 1; |
| 8893 | } else if (Align & 2) { |
| 8894 | UnitSize = 2; |
| 8895 | } else { |
| 8896 | // Check whether we can use NEON instructions. |
| 8897 | if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) && |
| 8898 | Subtarget->hasNEON()) { |
| 8899 | if ((Align % 16 == 0) && SizeVal >= 16) |
| 8900 | UnitSize = 16; |
| 8901 | else if ((Align % 8 == 0) && SizeVal >= 8) |
| 8902 | UnitSize = 8; |
| 8903 | } |
| 8904 | // Can't use NEON instructions. |
| 8905 | if (UnitSize == 0) |
| 8906 | UnitSize = 4; |
| 8907 | } |
| 8908 | |
| 8909 | // Select the correct opcode and register class for unit size load/store |
| 8910 | bool IsNeon = UnitSize >= 8; |
| 8911 | TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
| 8912 | if (IsNeon) |
| 8913 | VecTRC = UnitSize == 16 ? &ARM::DPairRegClass |
| 8914 | : UnitSize == 8 ? &ARM::DPRRegClass |
| 8915 | : nullptr; |
| 8916 | |
| 8917 | unsigned BytesLeft = SizeVal % UnitSize; |
| 8918 | unsigned LoopSize = SizeVal - BytesLeft; |
| 8919 | |
| 8920 | if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { |
| 8921 | // Use LDR and STR to copy. |
| 8922 | // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) |
| 8923 | // [destOut] = STR_POST(scratch, destIn, UnitSize) |
| 8924 | unsigned srcIn = src; |
| 8925 | unsigned destIn = dest; |
| 8926 | for (unsigned i = 0; i < LoopSize; i+=UnitSize) { |
| 8927 | unsigned srcOut = MRI.createVirtualRegister(TRC); |
| 8928 | unsigned destOut = MRI.createVirtualRegister(TRC); |
| 8929 | unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); |
| 8930 | emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut, |
| 8931 | IsThumb1, IsThumb2); |
| 8932 | emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut, |
| 8933 | IsThumb1, IsThumb2); |
| 8934 | srcIn = srcOut; |
| 8935 | destIn = destOut; |
| 8936 | } |
| 8937 | |
| 8938 | // Handle the leftover bytes with LDRB and STRB. |
| 8939 | // [scratch, srcOut] = LDRB_POST(srcIn, 1) |
| 8940 | // [destOut] = STRB_POST(scratch, destIn, 1) |
| 8941 | for (unsigned i = 0; i < BytesLeft; i++) { |
| 8942 | unsigned srcOut = MRI.createVirtualRegister(TRC); |
| 8943 | unsigned destOut = MRI.createVirtualRegister(TRC); |
| 8944 | unsigned scratch = MRI.createVirtualRegister(TRC); |
| 8945 | emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut, |
| 8946 | IsThumb1, IsThumb2); |
| 8947 | emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut, |
| 8948 | IsThumb1, IsThumb2); |
| 8949 | srcIn = srcOut; |
| 8950 | destIn = destOut; |
| 8951 | } |
| 8952 | MI.eraseFromParent(); // The instruction is gone now. |
| 8953 | return BB; |
| 8954 | } |
| 8955 | |
| 8956 | // Expand the pseudo op to a loop. |
| 8957 | // thisMBB: |
| 8958 | // ... |
| 8959 | // movw varEnd, # --> with thumb2 |
| 8960 | // movt varEnd, # |
| 8961 | // ldrcp varEnd, idx --> without thumb2 |
| 8962 | // fallthrough --> loopMBB |
| 8963 | // loopMBB: |
| 8964 | // PHI varPhi, varEnd, varLoop |
| 8965 | // PHI srcPhi, src, srcLoop |
| 8966 | // PHI destPhi, dst, destLoop |
| 8967 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
| 8968 | // [destLoop] = STR_POST(scratch, destPhi, UnitSize) |
| 8969 | // subs varLoop, varPhi, #UnitSize |
| 8970 | // bne loopMBB |
| 8971 | // fallthrough --> exitMBB |
| 8972 | // exitMBB: |
| 8973 | // epilogue to handle left-over bytes |
| 8974 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
| 8975 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
| 8976 | MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); |
| 8977 | MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); |
| 8978 | MF->insert(It, loopMBB); |
| 8979 | MF->insert(It, exitMBB); |
| 8980 | |
| 8981 | // Transfer the remainder of BB and its successor edges to exitMBB. |
| 8982 | exitMBB->splice(exitMBB->begin(), BB, |
| 8983 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); |
| 8984 | exitMBB->transferSuccessorsAndUpdatePHIs(BB); |
| 8985 | |
| 8986 | // Load an immediate to varEnd. |
| 8987 | unsigned varEnd = MRI.createVirtualRegister(TRC); |
| 8988 | if (Subtarget->useMovt()) { |
| 8989 | unsigned Vtmp = varEnd; |
| 8990 | if ((LoopSize & 0xFFFF0000) != 0) |
| 8991 | Vtmp = MRI.createVirtualRegister(TRC); |
| 8992 | BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp) |
| 8993 | .addImm(LoopSize & 0xFFFF) |
| 8994 | .add(predOps(ARMCC::AL)); |
| 8995 | |
| 8996 | if ((LoopSize & 0xFFFF0000) != 0) |
| 8997 | BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd) |
| 8998 | .addReg(Vtmp) |
| 8999 | .addImm(LoopSize >> 16) |
| 9000 | .add(predOps(ARMCC::AL)); |
| 9001 | } else { |
| 9002 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 9003 | Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); |
| 9004 | const Constant *C = ConstantInt::get(Int32Ty, LoopSize); |
| 9005 | |
| 9006 | // MachineConstantPool wants an explicit alignment. |
| 9007 | unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); |
| 9008 | if (Align == 0) |
| 9009 | Align = MF->getDataLayout().getTypeAllocSize(C->getType()); |
| 9010 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); |
| 9011 | MachineMemOperand *CPMMO = |
| 9012 | MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), |
| 9013 | MachineMemOperand::MOLoad, 4, 4); |
| 9014 | |
| 9015 | if (IsThumb) |
| 9016 | BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)) |
| 9017 | .addReg(varEnd, RegState::Define) |
| 9018 | .addConstantPoolIndex(Idx) |
| 9019 | .add(predOps(ARMCC::AL)) |
| 9020 | .addMemOperand(CPMMO); |
| 9021 | else |
| 9022 | BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)) |
| 9023 | .addReg(varEnd, RegState::Define) |
| 9024 | .addConstantPoolIndex(Idx) |
| 9025 | .addImm(0) |
| 9026 | .add(predOps(ARMCC::AL)) |
| 9027 | .addMemOperand(CPMMO); |
| 9028 | } |
| 9029 | BB->addSuccessor(loopMBB); |
| 9030 | |
| 9031 | // Generate the loop body: |
| 9032 | // varPhi = PHI(varLoop, varEnd) |
| 9033 | // srcPhi = PHI(srcLoop, src) |
| 9034 | // destPhi = PHI(destLoop, dst) |
| 9035 | MachineBasicBlock *entryBB = BB; |
| 9036 | BB = loopMBB; |
| 9037 | unsigned varLoop = MRI.createVirtualRegister(TRC); |
| 9038 | unsigned varPhi = MRI.createVirtualRegister(TRC); |
| 9039 | unsigned srcLoop = MRI.createVirtualRegister(TRC); |
| 9040 | unsigned srcPhi = MRI.createVirtualRegister(TRC); |
| 9041 | unsigned destLoop = MRI.createVirtualRegister(TRC); |
| 9042 | unsigned destPhi = MRI.createVirtualRegister(TRC); |
| 9043 | |
| 9044 | BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) |
| 9045 | .addReg(varLoop).addMBB(loopMBB) |
| 9046 | .addReg(varEnd).addMBB(entryBB); |
| 9047 | BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) |
| 9048 | .addReg(srcLoop).addMBB(loopMBB) |
| 9049 | .addReg(src).addMBB(entryBB); |
| 9050 | BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) |
| 9051 | .addReg(destLoop).addMBB(loopMBB) |
| 9052 | .addReg(dest).addMBB(entryBB); |
| 9053 | |
| 9054 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
| 9055 | // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) |
| 9056 | unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); |
| 9057 | emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop, |
| 9058 | IsThumb1, IsThumb2); |
| 9059 | emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop, |
| 9060 | IsThumb1, IsThumb2); |
| 9061 | |
| 9062 | // Decrement loop variable by UnitSize. |
| 9063 | if (IsThumb1) { |
| 9064 | BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop) |
| 9065 | .add(t1CondCodeOp()) |
| 9066 | .addReg(varPhi) |
| 9067 | .addImm(UnitSize) |
| 9068 | .add(predOps(ARMCC::AL)); |
| 9069 | } else { |
| 9070 | MachineInstrBuilder MIB = |
| 9071 | BuildMI(*BB, BB->end(), dl, |
| 9072 | TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); |
| 9073 | MIB.addReg(varPhi) |
| 9074 | .addImm(UnitSize) |
| 9075 | .add(predOps(ARMCC::AL)) |
| 9076 | .add(condCodeOp()); |
| 9077 | MIB->getOperand(5).setReg(ARM::CPSR); |
| 9078 | MIB->getOperand(5).setIsDef(true); |
| 9079 | } |
| 9080 | BuildMI(*BB, BB->end(), dl, |
| 9081 | TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
| 9082 | .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); |
| 9083 | |
| 9084 | // loopMBB can loop back to loopMBB or fall through to exitMBB. |
| 9085 | BB->addSuccessor(loopMBB); |
| 9086 | BB->addSuccessor(exitMBB); |
| 9087 | |
| 9088 | // Add epilogue to handle BytesLeft. |
| 9089 | BB = exitMBB; |
| 9090 | auto StartOfExit = exitMBB->begin(); |
| 9091 | |
| 9092 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
| 9093 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
| 9094 | unsigned srcIn = srcLoop; |
| 9095 | unsigned destIn = destLoop; |
| 9096 | for (unsigned i = 0; i < BytesLeft; i++) { |
| 9097 | unsigned srcOut = MRI.createVirtualRegister(TRC); |
| 9098 | unsigned destOut = MRI.createVirtualRegister(TRC); |
| 9099 | unsigned scratch = MRI.createVirtualRegister(TRC); |
| 9100 | emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut, |
| 9101 | IsThumb1, IsThumb2); |
| 9102 | emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut, |
| 9103 | IsThumb1, IsThumb2); |
| 9104 | srcIn = srcOut; |
| 9105 | destIn = destOut; |
| 9106 | } |
| 9107 | |
| 9108 | MI.eraseFromParent(); // The instruction is gone now. |
| 9109 | return BB; |
| 9110 | } |
| 9111 | |
| 9112 | MachineBasicBlock * |
| 9113 | ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI, |
| 9114 | MachineBasicBlock *MBB) const { |
| 9115 | const TargetMachine &TM = getTargetMachine(); |
| 9116 | const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); |
| 9117 | DebugLoc DL = MI.getDebugLoc(); |
| 9118 | |
| 9119 | assert(Subtarget->isTargetWindows() && |
| 9120 | "__chkstk is only supported on Windows" ); |
| 9121 | assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode" ); |
| 9122 | |
| 9123 | // __chkstk takes the number of words to allocate on the stack in R4, and |
| 9124 | // returns the stack adjustment in number of bytes in R4. This will not |
| 9125 | // clober any other registers (other than the obvious lr). |
| 9126 | // |
| 9127 | // Although, technically, IP should be considered a register which may be |
| 9128 | // clobbered, the call itself will not touch it. Windows on ARM is a pure |
| 9129 | // thumb-2 environment, so there is no interworking required. As a result, we |
| 9130 | // do not expect a veneer to be emitted by the linker, clobbering IP. |
| 9131 | // |
| 9132 | // Each module receives its own copy of __chkstk, so no import thunk is |
| 9133 | // required, again, ensuring that IP is not clobbered. |
| 9134 | // |
| 9135 | // Finally, although some linkers may theoretically provide a trampoline for |
| 9136 | // out of range calls (which is quite common due to a 32M range limitation of |
| 9137 | // branches for Thumb), we can generate the long-call version via |
| 9138 | // -mcmodel=large, alleviating the need for the trampoline which may clobber |
| 9139 | // IP. |
| 9140 | |
| 9141 | switch (TM.getCodeModel()) { |
| 9142 | case CodeModel::Tiny: |
| 9143 | llvm_unreachable("Tiny code model not available on ARM." ); |
| 9144 | case CodeModel::Small: |
| 9145 | case CodeModel::Medium: |
| 9146 | case CodeModel::Kernel: |
| 9147 | BuildMI(*MBB, MI, DL, TII.get(ARM::tBL)) |
| 9148 | .add(predOps(ARMCC::AL)) |
| 9149 | .addExternalSymbol("__chkstk" ) |
| 9150 | .addReg(ARM::R4, RegState::Implicit | RegState::Kill) |
| 9151 | .addReg(ARM::R4, RegState::Implicit | RegState::Define) |
| 9152 | .addReg(ARM::R12, |
| 9153 | RegState::Implicit | RegState::Define | RegState::Dead) |
| 9154 | .addReg(ARM::CPSR, |
| 9155 | RegState::Implicit | RegState::Define | RegState::Dead); |
| 9156 | break; |
| 9157 | case CodeModel::Large: { |
| 9158 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); |
| 9159 | unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass); |
| 9160 | |
| 9161 | BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg) |
| 9162 | .addExternalSymbol("__chkstk" ); |
| 9163 | BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr)) |
| 9164 | .add(predOps(ARMCC::AL)) |
| 9165 | .addReg(Reg, RegState::Kill) |
| 9166 | .addReg(ARM::R4, RegState::Implicit | RegState::Kill) |
| 9167 | .addReg(ARM::R4, RegState::Implicit | RegState::Define) |
| 9168 | .addReg(ARM::R12, |
| 9169 | RegState::Implicit | RegState::Define | RegState::Dead) |
| 9170 | .addReg(ARM::CPSR, |
| 9171 | RegState::Implicit | RegState::Define | RegState::Dead); |
| 9172 | break; |
| 9173 | } |
| 9174 | } |
| 9175 | |
| 9176 | BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP) |
| 9177 | .addReg(ARM::SP, RegState::Kill) |
| 9178 | .addReg(ARM::R4, RegState::Kill) |
| 9179 | .setMIFlags(MachineInstr::FrameSetup) |
| 9180 | .add(predOps(ARMCC::AL)) |
| 9181 | .add(condCodeOp()); |
| 9182 | |
| 9183 | MI.eraseFromParent(); |
| 9184 | return MBB; |
| 9185 | } |
| 9186 | |
| 9187 | MachineBasicBlock * |
| 9188 | ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI, |
| 9189 | MachineBasicBlock *MBB) const { |
| 9190 | DebugLoc DL = MI.getDebugLoc(); |
| 9191 | MachineFunction *MF = MBB->getParent(); |
| 9192 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 9193 | |
| 9194 | MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); |
| 9195 | MF->insert(++MBB->getIterator(), ContBB); |
| 9196 | ContBB->splice(ContBB->begin(), MBB, |
| 9197 | std::next(MachineBasicBlock::iterator(MI)), MBB->end()); |
| 9198 | ContBB->transferSuccessorsAndUpdatePHIs(MBB); |
| 9199 | MBB->addSuccessor(ContBB); |
| 9200 | |
| 9201 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
| 9202 | BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0)); |
| 9203 | MF->push_back(TrapBB); |
| 9204 | MBB->addSuccessor(TrapBB); |
| 9205 | |
| 9206 | BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8)) |
| 9207 | .addReg(MI.getOperand(0).getReg()) |
| 9208 | .addImm(0) |
| 9209 | .add(predOps(ARMCC::AL)); |
| 9210 | BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc)) |
| 9211 | .addMBB(TrapBB) |
| 9212 | .addImm(ARMCC::EQ) |
| 9213 | .addReg(ARM::CPSR); |
| 9214 | |
| 9215 | MI.eraseFromParent(); |
| 9216 | return ContBB; |
| 9217 | } |
| 9218 | |
| 9219 | // The CPSR operand of SelectItr might be missing a kill marker |
| 9220 | // because there were multiple uses of CPSR, and ISel didn't know |
| 9221 | // which to mark. Figure out whether SelectItr should have had a |
| 9222 | // kill marker, and set it if it should. Returns the correct kill |
| 9223 | // marker value. |
| 9224 | static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, |
| 9225 | MachineBasicBlock* BB, |
| 9226 | const TargetRegisterInfo* TRI) { |
| 9227 | // Scan forward through BB for a use/def of CPSR. |
| 9228 | MachineBasicBlock::iterator miI(std::next(SelectItr)); |
| 9229 | for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { |
| 9230 | const MachineInstr& mi = *miI; |
| 9231 | if (mi.readsRegister(ARM::CPSR)) |
| 9232 | return false; |
| 9233 | if (mi.definesRegister(ARM::CPSR)) |
| 9234 | break; // Should have kill-flag - update below. |
| 9235 | } |
| 9236 | |
| 9237 | // If we hit the end of the block, check whether CPSR is live into a |
| 9238 | // successor. |
| 9239 | if (miI == BB->end()) { |
| 9240 | for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), |
| 9241 | sEnd = BB->succ_end(); |
| 9242 | sItr != sEnd; ++sItr) { |
| 9243 | MachineBasicBlock* succ = *sItr; |
| 9244 | if (succ->isLiveIn(ARM::CPSR)) |
| 9245 | return false; |
| 9246 | } |
| 9247 | } |
| 9248 | |
| 9249 | // We found a def, or hit the end of the basic block and CPSR wasn't live |
| 9250 | // out. SelectMI should have a kill flag on CPSR. |
| 9251 | SelectItr->addRegisterKilled(ARM::CPSR, TRI); |
| 9252 | return true; |
| 9253 | } |
| 9254 | |
| 9255 | MachineBasicBlock * |
| 9256 | ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, |
| 9257 | MachineBasicBlock *BB) const { |
| 9258 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 9259 | DebugLoc dl = MI.getDebugLoc(); |
| 9260 | bool isThumb2 = Subtarget->isThumb2(); |
| 9261 | switch (MI.getOpcode()) { |
| 9262 | default: { |
| 9263 | MI.print(errs()); |
| 9264 | llvm_unreachable("Unexpected instr type to insert" ); |
| 9265 | } |
| 9266 | |
| 9267 | // Thumb1 post-indexed loads are really just single-register LDMs. |
| 9268 | case ARM::tLDR_postidx: { |
| 9269 | MachineOperand Def(MI.getOperand(1)); |
| 9270 | BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD)) |
| 9271 | .add(Def) // Rn_wb |
| 9272 | .add(MI.getOperand(2)) // Rn |
| 9273 | .add(MI.getOperand(3)) // PredImm |
| 9274 | .add(MI.getOperand(4)) // PredReg |
| 9275 | .add(MI.getOperand(0)) // Rt |
| 9276 | .cloneMemRefs(MI); |
| 9277 | MI.eraseFromParent(); |
| 9278 | return BB; |
| 9279 | } |
| 9280 | |
| 9281 | // The Thumb2 pre-indexed stores have the same MI operands, they just |
| 9282 | // define them differently in the .td files from the isel patterns, so |
| 9283 | // they need pseudos. |
| 9284 | case ARM::t2STR_preidx: |
| 9285 | MI.setDesc(TII->get(ARM::t2STR_PRE)); |
| 9286 | return BB; |
| 9287 | case ARM::t2STRB_preidx: |
| 9288 | MI.setDesc(TII->get(ARM::t2STRB_PRE)); |
| 9289 | return BB; |
| 9290 | case ARM::t2STRH_preidx: |
| 9291 | MI.setDesc(TII->get(ARM::t2STRH_PRE)); |
| 9292 | return BB; |
| 9293 | |
| 9294 | case ARM::STRi_preidx: |
| 9295 | case ARM::STRBi_preidx: { |
| 9296 | unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM |
| 9297 | : ARM::STRB_PRE_IMM; |
| 9298 | // Decode the offset. |
| 9299 | unsigned Offset = MI.getOperand(4).getImm(); |
| 9300 | bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; |
| 9301 | Offset = ARM_AM::getAM2Offset(Offset); |
| 9302 | if (isSub) |
| 9303 | Offset = -Offset; |
| 9304 | |
| 9305 | MachineMemOperand *MMO = *MI.memoperands_begin(); |
| 9306 | BuildMI(*BB, MI, dl, TII->get(NewOpc)) |
| 9307 | .add(MI.getOperand(0)) // Rn_wb |
| 9308 | .add(MI.getOperand(1)) // Rt |
| 9309 | .add(MI.getOperand(2)) // Rn |
| 9310 | .addImm(Offset) // offset (skip GPR==zero_reg) |
| 9311 | .add(MI.getOperand(5)) // pred |
| 9312 | .add(MI.getOperand(6)) |
| 9313 | .addMemOperand(MMO); |
| 9314 | MI.eraseFromParent(); |
| 9315 | return BB; |
| 9316 | } |
| 9317 | case ARM::STRr_preidx: |
| 9318 | case ARM::STRBr_preidx: |
| 9319 | case ARM::STRH_preidx: { |
| 9320 | unsigned NewOpc; |
| 9321 | switch (MI.getOpcode()) { |
| 9322 | default: llvm_unreachable("unexpected opcode!" ); |
| 9323 | case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; |
| 9324 | case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; |
| 9325 | case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; |
| 9326 | } |
| 9327 | MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); |
| 9328 | for (unsigned i = 0; i < MI.getNumOperands(); ++i) |
| 9329 | MIB.add(MI.getOperand(i)); |
| 9330 | MI.eraseFromParent(); |
| 9331 | return BB; |
| 9332 | } |
| 9333 | |
| 9334 | case ARM::tMOVCCr_pseudo: { |
| 9335 | // To "insert" a SELECT_CC instruction, we actually have to insert the |
| 9336 | // diamond control-flow pattern. The incoming instruction knows the |
| 9337 | // destination vreg to set, the condition code register to branch on, the |
| 9338 | // true/false values to select between, and a branch opcode to use. |
| 9339 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 9340 | MachineFunction::iterator It = ++BB->getIterator(); |
| 9341 | |
| 9342 | // thisMBB: |
| 9343 | // ... |
| 9344 | // TrueVal = ... |
| 9345 | // cmpTY ccX, r1, r2 |
| 9346 | // bCC copy1MBB |
| 9347 | // fallthrough --> copy0MBB |
| 9348 | MachineBasicBlock *thisMBB = BB; |
| 9349 | MachineFunction *F = BB->getParent(); |
| 9350 | MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); |
| 9351 | MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); |
| 9352 | F->insert(It, copy0MBB); |
| 9353 | F->insert(It, sinkMBB); |
| 9354 | |
| 9355 | // Check whether CPSR is live past the tMOVCCr_pseudo. |
| 9356 | const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 9357 | if (!MI.killsRegister(ARM::CPSR) && |
| 9358 | !checkAndUpdateCPSRKill(MI, thisMBB, TRI)) { |
| 9359 | copy0MBB->addLiveIn(ARM::CPSR); |
| 9360 | sinkMBB->addLiveIn(ARM::CPSR); |
| 9361 | } |
| 9362 | |
| 9363 | // Transfer the remainder of BB and its successor edges to sinkMBB. |
| 9364 | sinkMBB->splice(sinkMBB->begin(), BB, |
| 9365 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); |
| 9366 | sinkMBB->transferSuccessorsAndUpdatePHIs(BB); |
| 9367 | |
| 9368 | BB->addSuccessor(copy0MBB); |
| 9369 | BB->addSuccessor(sinkMBB); |
| 9370 | |
| 9371 | BuildMI(BB, dl, TII->get(ARM::tBcc)) |
| 9372 | .addMBB(sinkMBB) |
| 9373 | .addImm(MI.getOperand(3).getImm()) |
| 9374 | .addReg(MI.getOperand(4).getReg()); |
| 9375 | |
| 9376 | // copy0MBB: |
| 9377 | // %FalseValue = ... |
| 9378 | // # fallthrough to sinkMBB |
| 9379 | BB = copy0MBB; |
| 9380 | |
| 9381 | // Update machine-CFG edges |
| 9382 | BB->addSuccessor(sinkMBB); |
| 9383 | |
| 9384 | // sinkMBB: |
| 9385 | // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] |
| 9386 | // ... |
| 9387 | BB = sinkMBB; |
| 9388 | BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg()) |
| 9389 | .addReg(MI.getOperand(1).getReg()) |
| 9390 | .addMBB(copy0MBB) |
| 9391 | .addReg(MI.getOperand(2).getReg()) |
| 9392 | .addMBB(thisMBB); |
| 9393 | |
| 9394 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
| 9395 | return BB; |
| 9396 | } |
| 9397 | |
| 9398 | case ARM::BCCi64: |
| 9399 | case ARM::BCCZi64: { |
| 9400 | // If there is an unconditional branch to the other successor, remove it. |
| 9401 | BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end()); |
| 9402 | |
| 9403 | // Compare both parts that make up the double comparison separately for |
| 9404 | // equality. |
| 9405 | bool RHSisZero = MI.getOpcode() == ARM::BCCZi64; |
| 9406 | |
| 9407 | unsigned LHS1 = MI.getOperand(1).getReg(); |
| 9408 | unsigned LHS2 = MI.getOperand(2).getReg(); |
| 9409 | if (RHSisZero) { |
| 9410 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
| 9411 | .addReg(LHS1) |
| 9412 | .addImm(0) |
| 9413 | .add(predOps(ARMCC::AL)); |
| 9414 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
| 9415 | .addReg(LHS2).addImm(0) |
| 9416 | .addImm(ARMCC::EQ).addReg(ARM::CPSR); |
| 9417 | } else { |
| 9418 | unsigned RHS1 = MI.getOperand(3).getReg(); |
| 9419 | unsigned RHS2 = MI.getOperand(4).getReg(); |
| 9420 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
| 9421 | .addReg(LHS1) |
| 9422 | .addReg(RHS1) |
| 9423 | .add(predOps(ARMCC::AL)); |
| 9424 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
| 9425 | .addReg(LHS2).addReg(RHS2) |
| 9426 | .addImm(ARMCC::EQ).addReg(ARM::CPSR); |
| 9427 | } |
| 9428 | |
| 9429 | MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB(); |
| 9430 | MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); |
| 9431 | if (MI.getOperand(0).getImm() == ARMCC::NE) |
| 9432 | std::swap(destMBB, exitMBB); |
| 9433 | |
| 9434 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
| 9435 | .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); |
| 9436 | if (isThumb2) |
| 9437 | BuildMI(BB, dl, TII->get(ARM::t2B)) |
| 9438 | .addMBB(exitMBB) |
| 9439 | .add(predOps(ARMCC::AL)); |
| 9440 | else |
| 9441 | BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); |
| 9442 | |
| 9443 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
| 9444 | return BB; |
| 9445 | } |
| 9446 | |
| 9447 | case ARM::Int_eh_sjlj_setjmp: |
| 9448 | case ARM::Int_eh_sjlj_setjmp_nofp: |
| 9449 | case ARM::tInt_eh_sjlj_setjmp: |
| 9450 | case ARM::t2Int_eh_sjlj_setjmp: |
| 9451 | case ARM::t2Int_eh_sjlj_setjmp_nofp: |
| 9452 | return BB; |
| 9453 | |
| 9454 | case ARM::Int_eh_sjlj_setup_dispatch: |
| 9455 | EmitSjLjDispatchBlock(MI, BB); |
| 9456 | return BB; |
| 9457 | |
| 9458 | case ARM::ABS: |
| 9459 | case ARM::t2ABS: { |
| 9460 | // To insert an ABS instruction, we have to insert the |
| 9461 | // diamond control-flow pattern. The incoming instruction knows the |
| 9462 | // source vreg to test against 0, the destination vreg to set, |
| 9463 | // the condition code register to branch on, the |
| 9464 | // true/false values to select between, and a branch opcode to use. |
| 9465 | // It transforms |
| 9466 | // V1 = ABS V0 |
| 9467 | // into |
| 9468 | // V2 = MOVS V0 |
| 9469 | // BCC (branch to SinkBB if V0 >= 0) |
| 9470 | // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) |
| 9471 | // SinkBB: V1 = PHI(V2, V3) |
| 9472 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 9473 | MachineFunction::iterator BBI = ++BB->getIterator(); |
| 9474 | MachineFunction *Fn = BB->getParent(); |
| 9475 | MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); |
| 9476 | MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); |
| 9477 | Fn->insert(BBI, RSBBB); |
| 9478 | Fn->insert(BBI, SinkBB); |
| 9479 | |
| 9480 | unsigned int ABSSrcReg = MI.getOperand(1).getReg(); |
| 9481 | unsigned int ABSDstReg = MI.getOperand(0).getReg(); |
| 9482 | bool ABSSrcKIll = MI.getOperand(1).isKill(); |
| 9483 | bool isThumb2 = Subtarget->isThumb2(); |
| 9484 | MachineRegisterInfo &MRI = Fn->getRegInfo(); |
| 9485 | // In Thumb mode S must not be specified if source register is the SP or |
| 9486 | // PC and if destination register is the SP, so restrict register class |
| 9487 | unsigned NewRsbDstReg = |
| 9488 | MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); |
| 9489 | |
| 9490 | // Transfer the remainder of BB and its successor edges to sinkMBB. |
| 9491 | SinkBB->splice(SinkBB->begin(), BB, |
| 9492 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); |
| 9493 | SinkBB->transferSuccessorsAndUpdatePHIs(BB); |
| 9494 | |
| 9495 | BB->addSuccessor(RSBBB); |
| 9496 | BB->addSuccessor(SinkBB); |
| 9497 | |
| 9498 | // fall through to SinkMBB |
| 9499 | RSBBB->addSuccessor(SinkBB); |
| 9500 | |
| 9501 | // insert a cmp at the end of BB |
| 9502 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
| 9503 | .addReg(ABSSrcReg) |
| 9504 | .addImm(0) |
| 9505 | .add(predOps(ARMCC::AL)); |
| 9506 | |
| 9507 | // insert a bcc with opposite CC to ARMCC::MI at the end of BB |
| 9508 | BuildMI(BB, dl, |
| 9509 | TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) |
| 9510 | .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); |
| 9511 | |
| 9512 | // insert rsbri in RSBBB |
| 9513 | // Note: BCC and rsbri will be converted into predicated rsbmi |
| 9514 | // by if-conversion pass |
| 9515 | BuildMI(*RSBBB, RSBBB->begin(), dl, |
| 9516 | TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) |
| 9517 | .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0) |
| 9518 | .addImm(0) |
| 9519 | .add(predOps(ARMCC::AL)) |
| 9520 | .add(condCodeOp()); |
| 9521 | |
| 9522 | // insert PHI in SinkBB, |
| 9523 | // reuse ABSDstReg to not change uses of ABS instruction |
| 9524 | BuildMI(*SinkBB, SinkBB->begin(), dl, |
| 9525 | TII->get(ARM::PHI), ABSDstReg) |
| 9526 | .addReg(NewRsbDstReg).addMBB(RSBBB) |
| 9527 | .addReg(ABSSrcReg).addMBB(BB); |
| 9528 | |
| 9529 | // remove ABS instruction |
| 9530 | MI.eraseFromParent(); |
| 9531 | |
| 9532 | // return last added BB |
| 9533 | return SinkBB; |
| 9534 | } |
| 9535 | case ARM::COPY_STRUCT_BYVAL_I32: |
| 9536 | ++NumLoopByVals; |
| 9537 | return EmitStructByval(MI, BB); |
| 9538 | case ARM::WIN__CHKSTK: |
| 9539 | return EmitLowered__chkstk(MI, BB); |
| 9540 | case ARM::WIN__DBZCHK: |
| 9541 | return EmitLowered__dbzchk(MI, BB); |
| 9542 | } |
| 9543 | } |
| 9544 | |
| 9545 | /// Attaches vregs to MEMCPY that it will use as scratch registers |
| 9546 | /// when it is expanded into LDM/STM. This is done as a post-isel lowering |
| 9547 | /// instead of as a custom inserter because we need the use list from the SDNode. |
| 9548 | static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, |
| 9549 | MachineInstr &MI, const SDNode *Node) { |
| 9550 | bool isThumb1 = Subtarget->isThumb1Only(); |
| 9551 | |
| 9552 | DebugLoc DL = MI.getDebugLoc(); |
| 9553 | MachineFunction *MF = MI.getParent()->getParent(); |
| 9554 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 9555 | MachineInstrBuilder MIB(*MF, MI); |
| 9556 | |
| 9557 | // If the new dst/src is unused mark it as dead. |
| 9558 | if (!Node->hasAnyUseOfValue(0)) { |
| 9559 | MI.getOperand(0).setIsDead(true); |
| 9560 | } |
| 9561 | if (!Node->hasAnyUseOfValue(1)) { |
| 9562 | MI.getOperand(1).setIsDead(true); |
| 9563 | } |
| 9564 | |
| 9565 | // The MEMCPY both defines and kills the scratch registers. |
| 9566 | for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) { |
| 9567 | unsigned TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass |
| 9568 | : &ARM::GPRRegClass); |
| 9569 | MIB.addReg(TmpReg, RegState::Define|RegState::Dead); |
| 9570 | } |
| 9571 | } |
| 9572 | |
| 9573 | void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, |
| 9574 | SDNode *Node) const { |
| 9575 | if (MI.getOpcode() == ARM::MEMCPY) { |
| 9576 | attachMEMCPYScratchRegs(Subtarget, MI, Node); |
| 9577 | return; |
| 9578 | } |
| 9579 | |
| 9580 | const MCInstrDesc *MCID = &MI.getDesc(); |
| 9581 | // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, |
| 9582 | // RSC. Coming out of isel, they have an implicit CPSR def, but the optional |
| 9583 | // operand is still set to noreg. If needed, set the optional operand's |
| 9584 | // register to CPSR, and remove the redundant implicit def. |
| 9585 | // |
| 9586 | // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR). |
| 9587 | |
| 9588 | // Rename pseudo opcodes. |
| 9589 | unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode()); |
| 9590 | unsigned ccOutIdx; |
| 9591 | if (NewOpc) { |
| 9592 | const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); |
| 9593 | MCID = &TII->get(NewOpc); |
| 9594 | |
| 9595 | assert(MCID->getNumOperands() == |
| 9596 | MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() |
| 9597 | && "converted opcode should be the same except for cc_out" |
| 9598 | " (and, on Thumb1, pred)" ); |
| 9599 | |
| 9600 | MI.setDesc(*MCID); |
| 9601 | |
| 9602 | // Add the optional cc_out operand |
| 9603 | MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); |
| 9604 | |
| 9605 | // On Thumb1, move all input operands to the end, then add the predicate |
| 9606 | if (Subtarget->isThumb1Only()) { |
| 9607 | for (unsigned c = MCID->getNumOperands() - 4; c--;) { |
| 9608 | MI.addOperand(MI.getOperand(1)); |
| 9609 | MI.RemoveOperand(1); |
| 9610 | } |
| 9611 | |
| 9612 | // Restore the ties |
| 9613 | for (unsigned i = MI.getNumOperands(); i--;) { |
| 9614 | const MachineOperand& op = MI.getOperand(i); |
| 9615 | if (op.isReg() && op.isUse()) { |
| 9616 | int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO); |
| 9617 | if (DefIdx != -1) |
| 9618 | MI.tieOperands(DefIdx, i); |
| 9619 | } |
| 9620 | } |
| 9621 | |
| 9622 | MI.addOperand(MachineOperand::CreateImm(ARMCC::AL)); |
| 9623 | MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false)); |
| 9624 | ccOutIdx = 1; |
| 9625 | } else |
| 9626 | ccOutIdx = MCID->getNumOperands() - 1; |
| 9627 | } else |
| 9628 | ccOutIdx = MCID->getNumOperands() - 1; |
| 9629 | |
| 9630 | // Any ARM instruction that sets the 's' bit should specify an optional |
| 9631 | // "cc_out" operand in the last operand position. |
| 9632 | if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { |
| 9633 | assert(!NewOpc && "Optional cc_out operand required" ); |
| 9634 | return; |
| 9635 | } |
| 9636 | // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it |
| 9637 | // since we already have an optional CPSR def. |
| 9638 | bool definesCPSR = false; |
| 9639 | bool deadCPSR = false; |
| 9640 | for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e; |
| 9641 | ++i) { |
| 9642 | const MachineOperand &MO = MI.getOperand(i); |
| 9643 | if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { |
| 9644 | definesCPSR = true; |
| 9645 | if (MO.isDead()) |
| 9646 | deadCPSR = true; |
| 9647 | MI.RemoveOperand(i); |
| 9648 | break; |
| 9649 | } |
| 9650 | } |
| 9651 | if (!definesCPSR) { |
| 9652 | assert(!NewOpc && "Optional cc_out operand required" ); |
| 9653 | return; |
| 9654 | } |
| 9655 | assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag" ); |
| 9656 | if (deadCPSR) { |
| 9657 | assert(!MI.getOperand(ccOutIdx).getReg() && |
| 9658 | "expect uninitialized optional cc_out operand" ); |
| 9659 | // Thumb1 instructions must have the S bit even if the CPSR is dead. |
| 9660 | if (!Subtarget->isThumb1Only()) |
| 9661 | return; |
| 9662 | } |
| 9663 | |
| 9664 | // If this instruction was defined with an optional CPSR def and its dag node |
| 9665 | // had a live implicit CPSR def, then activate the optional CPSR def. |
| 9666 | MachineOperand &MO = MI.getOperand(ccOutIdx); |
| 9667 | MO.setReg(ARM::CPSR); |
| 9668 | MO.setIsDef(true); |
| 9669 | } |
| 9670 | |
| 9671 | //===----------------------------------------------------------------------===// |
| 9672 | // ARM Optimization Hooks |
| 9673 | //===----------------------------------------------------------------------===// |
| 9674 | |
| 9675 | // Helper function that checks if N is a null or all ones constant. |
| 9676 | static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { |
| 9677 | return AllOnes ? isAllOnesConstant(N) : isNullConstant(N); |
| 9678 | } |
| 9679 | |
| 9680 | // Return true if N is conditionally 0 or all ones. |
| 9681 | // Detects these expressions where cc is an i1 value: |
| 9682 | // |
| 9683 | // (select cc 0, y) [AllOnes=0] |
| 9684 | // (select cc y, 0) [AllOnes=0] |
| 9685 | // (zext cc) [AllOnes=0] |
| 9686 | // (sext cc) [AllOnes=0/1] |
| 9687 | // (select cc -1, y) [AllOnes=1] |
| 9688 | // (select cc y, -1) [AllOnes=1] |
| 9689 | // |
| 9690 | // Invert is set when N is the null/all ones constant when CC is false. |
| 9691 | // OtherOp is set to the alternative value of N. |
| 9692 | static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, |
| 9693 | SDValue &CC, bool &Invert, |
| 9694 | SDValue &OtherOp, |
| 9695 | SelectionDAG &DAG) { |
| 9696 | switch (N->getOpcode()) { |
| 9697 | default: return false; |
| 9698 | case ISD::SELECT: { |
| 9699 | CC = N->getOperand(0); |
| 9700 | SDValue N1 = N->getOperand(1); |
| 9701 | SDValue N2 = N->getOperand(2); |
| 9702 | if (isZeroOrAllOnes(N1, AllOnes)) { |
| 9703 | Invert = false; |
| 9704 | OtherOp = N2; |
| 9705 | return true; |
| 9706 | } |
| 9707 | if (isZeroOrAllOnes(N2, AllOnes)) { |
| 9708 | Invert = true; |
| 9709 | OtherOp = N1; |
| 9710 | return true; |
| 9711 | } |
| 9712 | return false; |
| 9713 | } |
| 9714 | case ISD::ZERO_EXTEND: |
| 9715 | // (zext cc) can never be the all ones value. |
| 9716 | if (AllOnes) |
| 9717 | return false; |
| 9718 | LLVM_FALLTHROUGH; |
| 9719 | case ISD::SIGN_EXTEND: { |
| 9720 | SDLoc dl(N); |
| 9721 | EVT VT = N->getValueType(0); |
| 9722 | CC = N->getOperand(0); |
| 9723 | if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC) |
| 9724 | return false; |
| 9725 | Invert = !AllOnes; |
| 9726 | if (AllOnes) |
| 9727 | // When looking for an AllOnes constant, N is an sext, and the 'other' |
| 9728 | // value is 0. |
| 9729 | OtherOp = DAG.getConstant(0, dl, VT); |
| 9730 | else if (N->getOpcode() == ISD::ZERO_EXTEND) |
| 9731 | // When looking for a 0 constant, N can be zext or sext. |
| 9732 | OtherOp = DAG.getConstant(1, dl, VT); |
| 9733 | else |
| 9734 | OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl, |
| 9735 | VT); |
| 9736 | return true; |
| 9737 | } |
| 9738 | } |
| 9739 | } |
| 9740 | |
| 9741 | // Combine a constant select operand into its use: |
| 9742 | // |
| 9743 | // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
| 9744 | // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
| 9745 | // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] |
| 9746 | // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
| 9747 | // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
| 9748 | // |
| 9749 | // The transform is rejected if the select doesn't have a constant operand that |
| 9750 | // is null, or all ones when AllOnes is set. |
| 9751 | // |
| 9752 | // Also recognize sext/zext from i1: |
| 9753 | // |
| 9754 | // (add (zext cc), x) -> (select cc (add x, 1), x) |
| 9755 | // (add (sext cc), x) -> (select cc (add x, -1), x) |
| 9756 | // |
| 9757 | // These transformations eventually create predicated instructions. |
| 9758 | // |
| 9759 | // @param N The node to transform. |
| 9760 | // @param Slct The N operand that is a select. |
| 9761 | // @param OtherOp The other N operand (x above). |
| 9762 | // @param DCI Context. |
| 9763 | // @param AllOnes Require the select constant to be all ones instead of null. |
| 9764 | // @returns The new node, or SDValue() on failure. |
| 9765 | static |
| 9766 | SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, |
| 9767 | TargetLowering::DAGCombinerInfo &DCI, |
| 9768 | bool AllOnes = false) { |
| 9769 | SelectionDAG &DAG = DCI.DAG; |
| 9770 | EVT VT = N->getValueType(0); |
| 9771 | SDValue NonConstantVal; |
| 9772 | SDValue CCOp; |
| 9773 | bool SwapSelectOps; |
| 9774 | if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, |
| 9775 | NonConstantVal, DAG)) |
| 9776 | return SDValue(); |
| 9777 | |
| 9778 | // Slct is now know to be the desired identity constant when CC is true. |
| 9779 | SDValue TrueVal = OtherOp; |
| 9780 | SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, |
| 9781 | OtherOp, NonConstantVal); |
| 9782 | // Unless SwapSelectOps says CC should be false. |
| 9783 | if (SwapSelectOps) |
| 9784 | std::swap(TrueVal, FalseVal); |
| 9785 | |
| 9786 | return DAG.getNode(ISD::SELECT, SDLoc(N), VT, |
| 9787 | CCOp, TrueVal, FalseVal); |
| 9788 | } |
| 9789 | |
| 9790 | // Attempt combineSelectAndUse on each operand of a commutative operator N. |
| 9791 | static |
| 9792 | SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, |
| 9793 | TargetLowering::DAGCombinerInfo &DCI) { |
| 9794 | SDValue N0 = N->getOperand(0); |
| 9795 | SDValue N1 = N->getOperand(1); |
| 9796 | if (N0.getNode()->hasOneUse()) |
| 9797 | if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes)) |
| 9798 | return Result; |
| 9799 | if (N1.getNode()->hasOneUse()) |
| 9800 | if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes)) |
| 9801 | return Result; |
| 9802 | return SDValue(); |
| 9803 | } |
| 9804 | |
| 9805 | static bool IsVUZPShuffleNode(SDNode *N) { |
| 9806 | // VUZP shuffle node. |
| 9807 | if (N->getOpcode() == ARMISD::VUZP) |
| 9808 | return true; |
| 9809 | |
| 9810 | // "VUZP" on i32 is an alias for VTRN. |
| 9811 | if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32) |
| 9812 | return true; |
| 9813 | |
| 9814 | return false; |
| 9815 | } |
| 9816 | |
| 9817 | static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, |
| 9818 | TargetLowering::DAGCombinerInfo &DCI, |
| 9819 | const ARMSubtarget *Subtarget) { |
| 9820 | // Look for ADD(VUZP.0, VUZP.1). |
| 9821 | if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() || |
| 9822 | N0 == N1) |
| 9823 | return SDValue(); |
| 9824 | |
| 9825 | // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD. |
| 9826 | if (!N->getValueType(0).is64BitVector()) |
| 9827 | return SDValue(); |
| 9828 | |
| 9829 | // Generate vpadd. |
| 9830 | SelectionDAG &DAG = DCI.DAG; |
| 9831 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 9832 | SDLoc dl(N); |
| 9833 | SDNode *Unzip = N0.getNode(); |
| 9834 | EVT VT = N->getValueType(0); |
| 9835 | |
| 9836 | SmallVector<SDValue, 8> Ops; |
| 9837 | Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl, |
| 9838 | TLI.getPointerTy(DAG.getDataLayout()))); |
| 9839 | Ops.push_back(Unzip->getOperand(0)); |
| 9840 | Ops.push_back(Unzip->getOperand(1)); |
| 9841 | |
| 9842 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); |
| 9843 | } |
| 9844 | |
| 9845 | static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
| 9846 | TargetLowering::DAGCombinerInfo &DCI, |
| 9847 | const ARMSubtarget *Subtarget) { |
| 9848 | // Check for two extended operands. |
| 9849 | if (!(N0.getOpcode() == ISD::SIGN_EXTEND && |
| 9850 | N1.getOpcode() == ISD::SIGN_EXTEND) && |
| 9851 | !(N0.getOpcode() == ISD::ZERO_EXTEND && |
| 9852 | N1.getOpcode() == ISD::ZERO_EXTEND)) |
| 9853 | return SDValue(); |
| 9854 | |
| 9855 | SDValue N00 = N0.getOperand(0); |
| 9856 | SDValue N10 = N1.getOperand(0); |
| 9857 | |
| 9858 | // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1)) |
| 9859 | if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() || |
| 9860 | N00 == N10) |
| 9861 | return SDValue(); |
| 9862 | |
| 9863 | // We only recognize Q register paddl here; this can't be reached until |
| 9864 | // after type legalization. |
| 9865 | if (!N00.getValueType().is64BitVector() || |
| 9866 | !N0.getValueType().is128BitVector()) |
| 9867 | return SDValue(); |
| 9868 | |
| 9869 | // Generate vpaddl. |
| 9870 | SelectionDAG &DAG = DCI.DAG; |
| 9871 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 9872 | SDLoc dl(N); |
| 9873 | EVT VT = N->getValueType(0); |
| 9874 | |
| 9875 | SmallVector<SDValue, 8> Ops; |
| 9876 | // Form vpaddl.sN or vpaddl.uN depending on the kind of extension. |
| 9877 | unsigned Opcode; |
| 9878 | if (N0.getOpcode() == ISD::SIGN_EXTEND) |
| 9879 | Opcode = Intrinsic::arm_neon_vpaddls; |
| 9880 | else |
| 9881 | Opcode = Intrinsic::arm_neon_vpaddlu; |
| 9882 | Ops.push_back(DAG.getConstant(Opcode, dl, |
| 9883 | TLI.getPointerTy(DAG.getDataLayout()))); |
| 9884 | EVT ElemTy = N00.getValueType().getVectorElementType(); |
| 9885 | unsigned NumElts = VT.getVectorNumElements(); |
| 9886 | EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2); |
| 9887 | SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT, |
| 9888 | N00.getOperand(0), N00.getOperand(1)); |
| 9889 | Ops.push_back(Concat); |
| 9890 | |
| 9891 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); |
| 9892 | } |
| 9893 | |
| 9894 | // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in |
| 9895 | // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is |
| 9896 | // much easier to match. |
| 9897 | static SDValue |
| 9898 | AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
| 9899 | TargetLowering::DAGCombinerInfo &DCI, |
| 9900 | const ARMSubtarget *Subtarget) { |
| 9901 | // Only perform optimization if after legalize, and if NEON is available. We |
| 9902 | // also expected both operands to be BUILD_VECTORs. |
| 9903 | if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() |
| 9904 | || N0.getOpcode() != ISD::BUILD_VECTOR |
| 9905 | || N1.getOpcode() != ISD::BUILD_VECTOR) |
| 9906 | return SDValue(); |
| 9907 | |
| 9908 | // Check output type since VPADDL operand elements can only be 8, 16, or 32. |
| 9909 | EVT VT = N->getValueType(0); |
| 9910 | if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) |
| 9911 | return SDValue(); |
| 9912 | |
| 9913 | // Check that the vector operands are of the right form. |
| 9914 | // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR |
| 9915 | // operands, where N is the size of the formed vector. |
| 9916 | // Each EXTRACT_VECTOR should have the same input vector and odd or even |
| 9917 | // index such that we have a pair wise add pattern. |
| 9918 | |
| 9919 | // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. |
| 9920 | if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 9921 | return SDValue(); |
| 9922 | SDValue Vec = N0->getOperand(0)->getOperand(0); |
| 9923 | SDNode *V = Vec.getNode(); |
| 9924 | unsigned nextIndex = 0; |
| 9925 | |
| 9926 | // For each operands to the ADD which are BUILD_VECTORs, |
| 9927 | // check to see if each of their operands are an EXTRACT_VECTOR with |
| 9928 | // the same vector and appropriate index. |
| 9929 | for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { |
| 9930 | if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT |
| 9931 | && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 9932 | |
| 9933 | SDValue ExtVec0 = N0->getOperand(i); |
| 9934 | SDValue ExtVec1 = N1->getOperand(i); |
| 9935 | |
| 9936 | // First operand is the vector, verify its the same. |
| 9937 | if (V != ExtVec0->getOperand(0).getNode() || |
| 9938 | V != ExtVec1->getOperand(0).getNode()) |
| 9939 | return SDValue(); |
| 9940 | |
| 9941 | // Second is the constant, verify its correct. |
| 9942 | ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); |
| 9943 | ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); |
| 9944 | |
| 9945 | // For the constant, we want to see all the even or all the odd. |
| 9946 | if (!C0 || !C1 || C0->getZExtValue() != nextIndex |
| 9947 | || C1->getZExtValue() != nextIndex+1) |
| 9948 | return SDValue(); |
| 9949 | |
| 9950 | // Increment index. |
| 9951 | nextIndex+=2; |
| 9952 | } else |
| 9953 | return SDValue(); |
| 9954 | } |
| 9955 | |
| 9956 | // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure |
| 9957 | // we're using the entire input vector, otherwise there's a size/legality |
| 9958 | // mismatch somewhere. |
| 9959 | if (nextIndex != Vec.getValueType().getVectorNumElements() || |
| 9960 | Vec.getValueType().getVectorElementType() == VT.getVectorElementType()) |
| 9961 | return SDValue(); |
| 9962 | |
| 9963 | // Create VPADDL node. |
| 9964 | SelectionDAG &DAG = DCI.DAG; |
| 9965 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 9966 | |
| 9967 | SDLoc dl(N); |
| 9968 | |
| 9969 | // Build operand list. |
| 9970 | SmallVector<SDValue, 8> Ops; |
| 9971 | Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl, |
| 9972 | TLI.getPointerTy(DAG.getDataLayout()))); |
| 9973 | |
| 9974 | // Input is the vector. |
| 9975 | Ops.push_back(Vec); |
| 9976 | |
| 9977 | // Get widened type and narrowed type. |
| 9978 | MVT widenType; |
| 9979 | unsigned numElem = VT.getVectorNumElements(); |
| 9980 | |
| 9981 | EVT inputLaneType = Vec.getValueType().getVectorElementType(); |
| 9982 | switch (inputLaneType.getSimpleVT().SimpleTy) { |
| 9983 | case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; |
| 9984 | case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; |
| 9985 | case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; |
| 9986 | default: |
| 9987 | llvm_unreachable("Invalid vector element type for padd optimization." ); |
| 9988 | } |
| 9989 | |
| 9990 | SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops); |
| 9991 | unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; |
| 9992 | return DAG.getNode(ExtOp, dl, VT, tmp); |
| 9993 | } |
| 9994 | |
| 9995 | static SDValue findMUL_LOHI(SDValue V) { |
| 9996 | if (V->getOpcode() == ISD::UMUL_LOHI || |
| 9997 | V->getOpcode() == ISD::SMUL_LOHI) |
| 9998 | return V; |
| 9999 | return SDValue(); |
| 10000 | } |
| 10001 | |
| 10002 | static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, |
| 10003 | TargetLowering::DAGCombinerInfo &DCI, |
| 10004 | const ARMSubtarget *Subtarget) { |
| 10005 | if (Subtarget->isThumb()) { |
| 10006 | if (!Subtarget->hasDSP()) |
| 10007 | return SDValue(); |
| 10008 | } else if (!Subtarget->hasV5TEOps()) |
| 10009 | return SDValue(); |
| 10010 | |
| 10011 | // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and |
| 10012 | // accumulates the product into a 64-bit value. The 16-bit values will |
| 10013 | // be sign extended somehow or SRA'd into 32-bit values |
| 10014 | // (addc (adde (mul 16bit, 16bit), lo), hi) |
| 10015 | SDValue Mul = AddcNode->getOperand(0); |
| 10016 | SDValue Lo = AddcNode->getOperand(1); |
| 10017 | if (Mul.getOpcode() != ISD::MUL) { |
| 10018 | Lo = AddcNode->getOperand(0); |
| 10019 | Mul = AddcNode->getOperand(1); |
| 10020 | if (Mul.getOpcode() != ISD::MUL) |
| 10021 | return SDValue(); |
| 10022 | } |
| 10023 | |
| 10024 | SDValue SRA = AddeNode->getOperand(0); |
| 10025 | SDValue Hi = AddeNode->getOperand(1); |
| 10026 | if (SRA.getOpcode() != ISD::SRA) { |
| 10027 | SRA = AddeNode->getOperand(1); |
| 10028 | Hi = AddeNode->getOperand(0); |
| 10029 | if (SRA.getOpcode() != ISD::SRA) |
| 10030 | return SDValue(); |
| 10031 | } |
| 10032 | if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) { |
| 10033 | if (Const->getZExtValue() != 31) |
| 10034 | return SDValue(); |
| 10035 | } else |
| 10036 | return SDValue(); |
| 10037 | |
| 10038 | if (SRA.getOperand(0) != Mul) |
| 10039 | return SDValue(); |
| 10040 | |
| 10041 | SelectionDAG &DAG = DCI.DAG; |
| 10042 | SDLoc dl(AddcNode); |
| 10043 | unsigned Opcode = 0; |
| 10044 | SDValue Op0; |
| 10045 | SDValue Op1; |
| 10046 | |
| 10047 | if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) { |
| 10048 | Opcode = ARMISD::SMLALBB; |
| 10049 | Op0 = Mul.getOperand(0); |
| 10050 | Op1 = Mul.getOperand(1); |
| 10051 | } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) { |
| 10052 | Opcode = ARMISD::SMLALBT; |
| 10053 | Op0 = Mul.getOperand(0); |
| 10054 | Op1 = Mul.getOperand(1).getOperand(0); |
| 10055 | } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) { |
| 10056 | Opcode = ARMISD::SMLALTB; |
| 10057 | Op0 = Mul.getOperand(0).getOperand(0); |
| 10058 | Op1 = Mul.getOperand(1); |
| 10059 | } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) { |
| 10060 | Opcode = ARMISD::SMLALTT; |
| 10061 | Op0 = Mul->getOperand(0).getOperand(0); |
| 10062 | Op1 = Mul->getOperand(1).getOperand(0); |
| 10063 | } |
| 10064 | |
| 10065 | if (!Op0 || !Op1) |
| 10066 | return SDValue(); |
| 10067 | |
| 10068 | SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), |
| 10069 | Op0, Op1, Lo, Hi); |
| 10070 | // Replace the ADDs' nodes uses by the MLA node's values. |
| 10071 | SDValue HiMLALResult(SMLAL.getNode(), 1); |
| 10072 | SDValue LoMLALResult(SMLAL.getNode(), 0); |
| 10073 | |
| 10074 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); |
| 10075 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); |
| 10076 | |
| 10077 | // Return original node to notify the driver to stop replacing. |
| 10078 | SDValue resNode(AddcNode, 0); |
| 10079 | return resNode; |
| 10080 | } |
| 10081 | |
| 10082 | static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, |
| 10083 | TargetLowering::DAGCombinerInfo &DCI, |
| 10084 | const ARMSubtarget *Subtarget) { |
| 10085 | // Look for multiply add opportunities. |
| 10086 | // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where |
| 10087 | // each add nodes consumes a value from ISD::UMUL_LOHI and there is |
| 10088 | // a glue link from the first add to the second add. |
| 10089 | // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by |
| 10090 | // a S/UMLAL instruction. |
| 10091 | // UMUL_LOHI |
| 10092 | // / :lo \ :hi |
| 10093 | // V \ [no multiline comment] |
| 10094 | // loAdd -> ADDC | |
| 10095 | // \ :carry / |
| 10096 | // V V |
| 10097 | // ADDE <- hiAdd |
| 10098 | // |
| 10099 | // In the special case where only the higher part of a signed result is used |
| 10100 | // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts |
| 10101 | // a constant with the exact value of 0x80000000, we recognize we are dealing |
| 10102 | // with a "rounded multiply and add" (or subtract) and transform it into |
| 10103 | // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively. |
| 10104 | |
| 10105 | assert((AddeSubeNode->getOpcode() == ARMISD::ADDE || |
| 10106 | AddeSubeNode->getOpcode() == ARMISD::SUBE) && |
| 10107 | "Expect an ADDE or SUBE" ); |
| 10108 | |
| 10109 | assert(AddeSubeNode->getNumOperands() == 3 && |
| 10110 | AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && |
| 10111 | "ADDE node has the wrong inputs" ); |
| 10112 | |
| 10113 | // Check that we are chained to the right ADDC or SUBC node. |
| 10114 | SDNode *AddcSubcNode = AddeSubeNode->getOperand(2).getNode(); |
| 10115 | if ((AddeSubeNode->getOpcode() == ARMISD::ADDE && |
| 10116 | AddcSubcNode->getOpcode() != ARMISD::ADDC) || |
| 10117 | (AddeSubeNode->getOpcode() == ARMISD::SUBE && |
| 10118 | AddcSubcNode->getOpcode() != ARMISD::SUBC)) |
| 10119 | return SDValue(); |
| 10120 | |
| 10121 | SDValue AddcSubcOp0 = AddcSubcNode->getOperand(0); |
| 10122 | SDValue AddcSubcOp1 = AddcSubcNode->getOperand(1); |
| 10123 | |
| 10124 | // Check if the two operands are from the same mul_lohi node. |
| 10125 | if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode()) |
| 10126 | return SDValue(); |
| 10127 | |
| 10128 | assert(AddcSubcNode->getNumValues() == 2 && |
| 10129 | AddcSubcNode->getValueType(0) == MVT::i32 && |
| 10130 | "Expect ADDC with two result values. First: i32" ); |
| 10131 | |
| 10132 | // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it |
| 10133 | // maybe a SMLAL which multiplies two 16-bit values. |
| 10134 | if (AddeSubeNode->getOpcode() == ARMISD::ADDE && |
| 10135 | AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI && |
| 10136 | AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI && |
| 10137 | AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI && |
| 10138 | AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI) |
| 10139 | return AddCombineTo64BitSMLAL16(AddcSubcNode, AddeSubeNode, DCI, Subtarget); |
| 10140 | |
| 10141 | // Check for the triangle shape. |
| 10142 | SDValue AddeSubeOp0 = AddeSubeNode->getOperand(0); |
| 10143 | SDValue AddeSubeOp1 = AddeSubeNode->getOperand(1); |
| 10144 | |
| 10145 | // Make sure that the ADDE/SUBE operands are not coming from the same node. |
| 10146 | if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode()) |
| 10147 | return SDValue(); |
| 10148 | |
| 10149 | // Find the MUL_LOHI node walking up ADDE/SUBE's operands. |
| 10150 | bool IsLeftOperandMUL = false; |
| 10151 | SDValue MULOp = findMUL_LOHI(AddeSubeOp0); |
| 10152 | if (MULOp == SDValue()) |
| 10153 | MULOp = findMUL_LOHI(AddeSubeOp1); |
| 10154 | else |
| 10155 | IsLeftOperandMUL = true; |
| 10156 | if (MULOp == SDValue()) |
| 10157 | return SDValue(); |
| 10158 | |
| 10159 | // Figure out the right opcode. |
| 10160 | unsigned Opc = MULOp->getOpcode(); |
| 10161 | unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; |
| 10162 | |
| 10163 | // Figure out the high and low input values to the MLAL node. |
| 10164 | SDValue *HiAddSub = nullptr; |
| 10165 | SDValue *LoMul = nullptr; |
| 10166 | SDValue *LowAddSub = nullptr; |
| 10167 | |
| 10168 | // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI. |
| 10169 | if ((AddeSubeOp0 != MULOp.getValue(1)) && (AddeSubeOp1 != MULOp.getValue(1))) |
| 10170 | return SDValue(); |
| 10171 | |
| 10172 | if (IsLeftOperandMUL) |
| 10173 | HiAddSub = &AddeSubeOp1; |
| 10174 | else |
| 10175 | HiAddSub = &AddeSubeOp0; |
| 10176 | |
| 10177 | // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node |
| 10178 | // whose low result is fed to the ADDC/SUBC we are checking. |
| 10179 | |
| 10180 | if (AddcSubcOp0 == MULOp.getValue(0)) { |
| 10181 | LoMul = &AddcSubcOp0; |
| 10182 | LowAddSub = &AddcSubcOp1; |
| 10183 | } |
| 10184 | if (AddcSubcOp1 == MULOp.getValue(0)) { |
| 10185 | LoMul = &AddcSubcOp1; |
| 10186 | LowAddSub = &AddcSubcOp0; |
| 10187 | } |
| 10188 | |
| 10189 | if (!LoMul) |
| 10190 | return SDValue(); |
| 10191 | |
| 10192 | // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC |
| 10193 | // the replacement below will create a cycle. |
| 10194 | if (AddcSubcNode == HiAddSub->getNode() || |
| 10195 | AddcSubcNode->isPredecessorOf(HiAddSub->getNode())) |
| 10196 | return SDValue(); |
| 10197 | |
| 10198 | // Create the merged node. |
| 10199 | SelectionDAG &DAG = DCI.DAG; |
| 10200 | |
| 10201 | // Start building operand list. |
| 10202 | SmallVector<SDValue, 8> Ops; |
| 10203 | Ops.push_back(LoMul->getOperand(0)); |
| 10204 | Ops.push_back(LoMul->getOperand(1)); |
| 10205 | |
| 10206 | // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be |
| 10207 | // the case, we must be doing signed multiplication and only use the higher |
| 10208 | // part of the result of the MLAL, furthermore the LowAddSub must be a constant |
| 10209 | // addition or subtraction with the value of 0x800000. |
| 10210 | if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() && |
| 10211 | FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(1) && |
| 10212 | LowAddSub->getNode()->getOpcode() == ISD::Constant && |
| 10213 | static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() == |
| 10214 | 0x80000000) { |
| 10215 | Ops.push_back(*HiAddSub); |
| 10216 | if (AddcSubcNode->getOpcode() == ARMISD::SUBC) { |
| 10217 | FinalOpc = ARMISD::SMMLSR; |
| 10218 | } else { |
| 10219 | FinalOpc = ARMISD::SMMLAR; |
| 10220 | } |
| 10221 | SDValue NewNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), MVT::i32, Ops); |
| 10222 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), NewNode); |
| 10223 | |
| 10224 | return SDValue(AddeSubeNode, 0); |
| 10225 | } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC) |
| 10226 | // SMMLS is generated during instruction selection and the rest of this |
| 10227 | // function can not handle the case where AddcSubcNode is a SUBC. |
| 10228 | return SDValue(); |
| 10229 | |
| 10230 | // Finish building the operand list for {U/S}MLAL |
| 10231 | Ops.push_back(*LowAddSub); |
| 10232 | Ops.push_back(*HiAddSub); |
| 10233 | |
| 10234 | SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), |
| 10235 | DAG.getVTList(MVT::i32, MVT::i32), Ops); |
| 10236 | |
| 10237 | // Replace the ADDs' nodes uses by the MLA node's values. |
| 10238 | SDValue HiMLALResult(MLALNode.getNode(), 1); |
| 10239 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), HiMLALResult); |
| 10240 | |
| 10241 | SDValue LoMLALResult(MLALNode.getNode(), 0); |
| 10242 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode, 0), LoMLALResult); |
| 10243 | |
| 10244 | // Return original node to notify the driver to stop replacing. |
| 10245 | return SDValue(AddeSubeNode, 0); |
| 10246 | } |
| 10247 | |
| 10248 | static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, |
| 10249 | TargetLowering::DAGCombinerInfo &DCI, |
| 10250 | const ARMSubtarget *Subtarget) { |
| 10251 | // UMAAL is similar to UMLAL except that it adds two unsigned values. |
| 10252 | // While trying to combine for the other MLAL nodes, first search for the |
| 10253 | // chance to use UMAAL. Check if Addc uses a node which has already |
| 10254 | // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde |
| 10255 | // as the addend, and it's handled in PerformUMLALCombine. |
| 10256 | |
| 10257 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
| 10258 | return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); |
| 10259 | |
| 10260 | // Check that we have a glued ADDC node. |
| 10261 | SDNode* AddcNode = AddeNode->getOperand(2).getNode(); |
| 10262 | if (AddcNode->getOpcode() != ARMISD::ADDC) |
| 10263 | return SDValue(); |
| 10264 | |
| 10265 | // Find the converted UMAAL or quit if it doesn't exist. |
| 10266 | SDNode *UmlalNode = nullptr; |
| 10267 | SDValue AddHi; |
| 10268 | if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) { |
| 10269 | UmlalNode = AddcNode->getOperand(0).getNode(); |
| 10270 | AddHi = AddcNode->getOperand(1); |
| 10271 | } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) { |
| 10272 | UmlalNode = AddcNode->getOperand(1).getNode(); |
| 10273 | AddHi = AddcNode->getOperand(0); |
| 10274 | } else { |
| 10275 | return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); |
| 10276 | } |
| 10277 | |
| 10278 | // The ADDC should be glued to an ADDE node, which uses the same UMLAL as |
| 10279 | // the ADDC as well as Zero. |
| 10280 | if (!isNullConstant(UmlalNode->getOperand(3))) |
| 10281 | return SDValue(); |
| 10282 | |
| 10283 | if ((isNullConstant(AddeNode->getOperand(0)) && |
| 10284 | AddeNode->getOperand(1).getNode() == UmlalNode) || |
| 10285 | (AddeNode->getOperand(0).getNode() == UmlalNode && |
| 10286 | isNullConstant(AddeNode->getOperand(1)))) { |
| 10287 | SelectionDAG &DAG = DCI.DAG; |
| 10288 | SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1), |
| 10289 | UmlalNode->getOperand(2), AddHi }; |
| 10290 | SDValue UMAAL = DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode), |
| 10291 | DAG.getVTList(MVT::i32, MVT::i32), Ops); |
| 10292 | |
| 10293 | // Replace the ADDs' nodes uses by the UMAAL node's values. |
| 10294 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1)); |
| 10295 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0)); |
| 10296 | |
| 10297 | // Return original node to notify the driver to stop replacing. |
| 10298 | return SDValue(AddeNode, 0); |
| 10299 | } |
| 10300 | return SDValue(); |
| 10301 | } |
| 10302 | |
| 10303 | static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, |
| 10304 | const ARMSubtarget *Subtarget) { |
| 10305 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
| 10306 | return SDValue(); |
| 10307 | |
| 10308 | // Check that we have a pair of ADDC and ADDE as operands. |
| 10309 | // Both addends of the ADDE must be zero. |
| 10310 | SDNode* AddcNode = N->getOperand(2).getNode(); |
| 10311 | SDNode* AddeNode = N->getOperand(3).getNode(); |
| 10312 | if ((AddcNode->getOpcode() == ARMISD::ADDC) && |
| 10313 | (AddeNode->getOpcode() == ARMISD::ADDE) && |
| 10314 | isNullConstant(AddeNode->getOperand(0)) && |
| 10315 | isNullConstant(AddeNode->getOperand(1)) && |
| 10316 | (AddeNode->getOperand(2).getNode() == AddcNode)) |
| 10317 | return DAG.getNode(ARMISD::UMAAL, SDLoc(N), |
| 10318 | DAG.getVTList(MVT::i32, MVT::i32), |
| 10319 | {N->getOperand(0), N->getOperand(1), |
| 10320 | AddcNode->getOperand(0), AddcNode->getOperand(1)}); |
| 10321 | else |
| 10322 | return SDValue(); |
| 10323 | } |
| 10324 | |
| 10325 | static SDValue PerformAddcSubcCombine(SDNode *N, |
| 10326 | TargetLowering::DAGCombinerInfo &DCI, |
| 10327 | const ARMSubtarget *Subtarget) { |
| 10328 | SelectionDAG &DAG(DCI.DAG); |
| 10329 | |
| 10330 | if (N->getOpcode() == ARMISD::SUBC) { |
| 10331 | // (SUBC (ADDE 0, 0, C), 1) -> C |
| 10332 | SDValue LHS = N->getOperand(0); |
| 10333 | SDValue RHS = N->getOperand(1); |
| 10334 | if (LHS->getOpcode() == ARMISD::ADDE && |
| 10335 | isNullConstant(LHS->getOperand(0)) && |
| 10336 | isNullConstant(LHS->getOperand(1)) && isOneConstant(RHS)) { |
| 10337 | return DCI.CombineTo(N, SDValue(N, 0), LHS->getOperand(2)); |
| 10338 | } |
| 10339 | } |
| 10340 | |
| 10341 | if (Subtarget->isThumb1Only()) { |
| 10342 | SDValue RHS = N->getOperand(1); |
| 10343 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { |
| 10344 | int32_t imm = C->getSExtValue(); |
| 10345 | if (imm < 0 && imm > std::numeric_limits<int>::min()) { |
| 10346 | SDLoc DL(N); |
| 10347 | RHS = DAG.getConstant(-imm, DL, MVT::i32); |
| 10348 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC |
| 10349 | : ARMISD::ADDC; |
| 10350 | return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS); |
| 10351 | } |
| 10352 | } |
| 10353 | } |
| 10354 | |
| 10355 | return SDValue(); |
| 10356 | } |
| 10357 | |
| 10358 | static SDValue PerformAddeSubeCombine(SDNode *N, |
| 10359 | TargetLowering::DAGCombinerInfo &DCI, |
| 10360 | const ARMSubtarget *Subtarget) { |
| 10361 | if (Subtarget->isThumb1Only()) { |
| 10362 | SelectionDAG &DAG = DCI.DAG; |
| 10363 | SDValue RHS = N->getOperand(1); |
| 10364 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { |
| 10365 | int64_t imm = C->getSExtValue(); |
| 10366 | if (imm < 0) { |
| 10367 | SDLoc DL(N); |
| 10368 | |
| 10369 | // The with-carry-in form matches bitwise not instead of the negation. |
| 10370 | // Effectively, the inverse interpretation of the carry flag already |
| 10371 | // accounts for part of the negation. |
| 10372 | RHS = DAG.getConstant(~imm, DL, MVT::i32); |
| 10373 | |
| 10374 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE |
| 10375 | : ARMISD::ADDE; |
| 10376 | return DAG.getNode(Opcode, DL, N->getVTList(), |
| 10377 | N->getOperand(0), RHS, N->getOperand(2)); |
| 10378 | } |
| 10379 | } |
| 10380 | } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) { |
| 10381 | return AddCombineTo64bitMLAL(N, DCI, Subtarget); |
| 10382 | } |
| 10383 | return SDValue(); |
| 10384 | } |
| 10385 | |
| 10386 | static SDValue PerformABSCombine(SDNode *N, |
| 10387 | TargetLowering::DAGCombinerInfo &DCI, |
| 10388 | const ARMSubtarget *Subtarget) { |
| 10389 | SDValue res; |
| 10390 | SelectionDAG &DAG = DCI.DAG; |
| 10391 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 10392 | |
| 10393 | if (TLI.isOperationLegal(N->getOpcode(), N->getValueType(0))) |
| 10394 | return SDValue(); |
| 10395 | |
| 10396 | if (!TLI.expandABS(N, res, DAG)) |
| 10397 | return SDValue(); |
| 10398 | |
| 10399 | return res; |
| 10400 | } |
| 10401 | |
| 10402 | /// PerformADDECombine - Target-specific dag combine transform from |
| 10403 | /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or |
| 10404 | /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL |
| 10405 | static SDValue PerformADDECombine(SDNode *N, |
| 10406 | TargetLowering::DAGCombinerInfo &DCI, |
| 10407 | const ARMSubtarget *Subtarget) { |
| 10408 | // Only ARM and Thumb2 support UMLAL/SMLAL. |
| 10409 | if (Subtarget->isThumb1Only()) |
| 10410 | return PerformAddeSubeCombine(N, DCI, Subtarget); |
| 10411 | |
| 10412 | // Only perform the checks after legalize when the pattern is available. |
| 10413 | if (DCI.isBeforeLegalize()) return SDValue(); |
| 10414 | |
| 10415 | return AddCombineTo64bitUMAAL(N, DCI, Subtarget); |
| 10416 | } |
| 10417 | |
| 10418 | /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with |
| 10419 | /// operands N0 and N1. This is a helper for PerformADDCombine that is |
| 10420 | /// called with the default operands, and if that fails, with commuted |
| 10421 | /// operands. |
| 10422 | static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, |
| 10423 | TargetLowering::DAGCombinerInfo &DCI, |
| 10424 | const ARMSubtarget *Subtarget){ |
| 10425 | // Attempt to create vpadd for this add. |
| 10426 | if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget)) |
| 10427 | return Result; |
| 10428 | |
| 10429 | // Attempt to create vpaddl for this add. |
| 10430 | if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget)) |
| 10431 | return Result; |
| 10432 | if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI, |
| 10433 | Subtarget)) |
| 10434 | return Result; |
| 10435 | |
| 10436 | // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
| 10437 | if (N0.getNode()->hasOneUse()) |
| 10438 | if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI)) |
| 10439 | return Result; |
| 10440 | return SDValue(); |
| 10441 | } |
| 10442 | |
| 10443 | bool |
| 10444 | ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N, |
| 10445 | CombineLevel Level) const { |
| 10446 | if (Level == BeforeLegalizeTypes) |
| 10447 | return true; |
| 10448 | |
| 10449 | if (N->getOpcode() != ISD::SHL) |
| 10450 | return true; |
| 10451 | |
| 10452 | if (Subtarget->isThumb1Only()) { |
| 10453 | // Avoid making expensive immediates by commuting shifts. (This logic |
| 10454 | // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted |
| 10455 | // for free.) |
| 10456 | if (N->getOpcode() != ISD::SHL) |
| 10457 | return true; |
| 10458 | SDValue N1 = N->getOperand(0); |
| 10459 | if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND && |
| 10460 | N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR) |
| 10461 | return true; |
| 10462 | if (auto *Const = dyn_cast<ConstantSDNode>(N1->getOperand(1))) { |
| 10463 | if (Const->getAPIntValue().ult(256)) |
| 10464 | return false; |
| 10465 | if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(0) && |
| 10466 | Const->getAPIntValue().sgt(-256)) |
| 10467 | return false; |
| 10468 | } |
| 10469 | return true; |
| 10470 | } |
| 10471 | |
| 10472 | // Turn off commute-with-shift transform after legalization, so it doesn't |
| 10473 | // conflict with PerformSHLSimplify. (We could try to detect when |
| 10474 | // PerformSHLSimplify would trigger more precisely, but it isn't |
| 10475 | // really necessary.) |
| 10476 | return false; |
| 10477 | } |
| 10478 | |
| 10479 | bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( |
| 10480 | const SDNode *N, CombineLevel Level) const { |
| 10481 | if (!Subtarget->isThumb1Only()) |
| 10482 | return true; |
| 10483 | |
| 10484 | if (Level == BeforeLegalizeTypes) |
| 10485 | return true; |
| 10486 | |
| 10487 | return false; |
| 10488 | } |
| 10489 | |
| 10490 | static SDValue PerformSHLSimplify(SDNode *N, |
| 10491 | TargetLowering::DAGCombinerInfo &DCI, |
| 10492 | const ARMSubtarget *ST) { |
| 10493 | // Allow the generic combiner to identify potential bswaps. |
| 10494 | if (DCI.isBeforeLegalize()) |
| 10495 | return SDValue(); |
| 10496 | |
| 10497 | // DAG combiner will fold: |
| 10498 | // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) |
| 10499 | // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2 |
| 10500 | // Other code patterns that can be also be modified have the following form: |
| 10501 | // b + ((a << 1) | 510) |
| 10502 | // b + ((a << 1) & 510) |
| 10503 | // b + ((a << 1) ^ 510) |
| 10504 | // b + ((a << 1) + 510) |
| 10505 | |
| 10506 | // Many instructions can perform the shift for free, but it requires both |
| 10507 | // the operands to be registers. If c1 << c2 is too large, a mov immediate |
| 10508 | // instruction will needed. So, unfold back to the original pattern if: |
| 10509 | // - if c1 and c2 are small enough that they don't require mov imms. |
| 10510 | // - the user(s) of the node can perform an shl |
| 10511 | |
| 10512 | // No shifted operands for 16-bit instructions. |
| 10513 | if (ST->isThumb() && ST->isThumb1Only()) |
| 10514 | return SDValue(); |
| 10515 | |
| 10516 | // Check that all the users could perform the shl themselves. |
| 10517 | for (auto U : N->uses()) { |
| 10518 | switch(U->getOpcode()) { |
| 10519 | default: |
| 10520 | return SDValue(); |
| 10521 | case ISD::SUB: |
| 10522 | case ISD::ADD: |
| 10523 | case ISD::AND: |
| 10524 | case ISD::OR: |
| 10525 | case ISD::XOR: |
| 10526 | case ISD::SETCC: |
| 10527 | case ARMISD::CMP: |
| 10528 | // Check that the user isn't already using a constant because there |
| 10529 | // aren't any instructions that support an immediate operand and a |
| 10530 | // shifted operand. |
| 10531 | if (isa<ConstantSDNode>(U->getOperand(0)) || |
| 10532 | isa<ConstantSDNode>(U->getOperand(1))) |
| 10533 | return SDValue(); |
| 10534 | |
| 10535 | // Check that it's not already using a shift. |
| 10536 | if (U->getOperand(0).getOpcode() == ISD::SHL || |
| 10537 | U->getOperand(1).getOpcode() == ISD::SHL) |
| 10538 | return SDValue(); |
| 10539 | break; |
| 10540 | } |
| 10541 | } |
| 10542 | |
| 10543 | if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR && |
| 10544 | N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND) |
| 10545 | return SDValue(); |
| 10546 | |
| 10547 | if (N->getOperand(0).getOpcode() != ISD::SHL) |
| 10548 | return SDValue(); |
| 10549 | |
| 10550 | SDValue SHL = N->getOperand(0); |
| 10551 | |
| 10552 | auto *C1ShlC2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| 10553 | auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1)); |
| 10554 | if (!C1ShlC2 || !C2) |
| 10555 | return SDValue(); |
| 10556 | |
| 10557 | APInt C2Int = C2->getAPIntValue(); |
| 10558 | APInt C1Int = C1ShlC2->getAPIntValue(); |
| 10559 | |
| 10560 | // Check that performing a lshr will not lose any information. |
| 10561 | APInt Mask = APInt::getHighBitsSet(C2Int.getBitWidth(), |
| 10562 | C2Int.getBitWidth() - C2->getZExtValue()); |
| 10563 | if ((C1Int & Mask) != C1Int) |
| 10564 | return SDValue(); |
| 10565 | |
| 10566 | // Shift the first constant. |
| 10567 | C1Int.lshrInPlace(C2Int); |
| 10568 | |
| 10569 | // The immediates are encoded as an 8-bit value that can be rotated. |
| 10570 | auto LargeImm = [](const APInt &Imm) { |
| 10571 | unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros(); |
| 10572 | return Imm.getBitWidth() - Zeros > 8; |
| 10573 | }; |
| 10574 | |
| 10575 | if (LargeImm(C1Int) || LargeImm(C2Int)) |
| 10576 | return SDValue(); |
| 10577 | |
| 10578 | SelectionDAG &DAG = DCI.DAG; |
| 10579 | SDLoc dl(N); |
| 10580 | SDValue X = SHL.getOperand(0); |
| 10581 | SDValue BinOp = DAG.getNode(N->getOpcode(), dl, MVT::i32, X, |
| 10582 | DAG.getConstant(C1Int, dl, MVT::i32)); |
| 10583 | // Shift left to compensate for the lshr of C1Int. |
| 10584 | SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1)); |
| 10585 | |
| 10586 | LLVM_DEBUG(dbgs() << "Simplify shl use:\n" ; SHL.getOperand(0).dump(); |
| 10587 | SHL.dump(); N->dump()); |
| 10588 | LLVM_DEBUG(dbgs() << "Into:\n" ; X.dump(); BinOp.dump(); Res.dump()); |
| 10589 | return Res; |
| 10590 | } |
| 10591 | |
| 10592 | |
| 10593 | /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. |
| 10594 | /// |
| 10595 | static SDValue PerformADDCombine(SDNode *N, |
| 10596 | TargetLowering::DAGCombinerInfo &DCI, |
| 10597 | const ARMSubtarget *Subtarget) { |
| 10598 | SDValue N0 = N->getOperand(0); |
| 10599 | SDValue N1 = N->getOperand(1); |
| 10600 | |
| 10601 | // Only works one way, because it needs an immediate operand. |
| 10602 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) |
| 10603 | return Result; |
| 10604 | |
| 10605 | // First try with the default operand order. |
| 10606 | if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) |
| 10607 | return Result; |
| 10608 | |
| 10609 | // If that didn't work, try again with the operands commuted. |
| 10610 | return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); |
| 10611 | } |
| 10612 | |
| 10613 | /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. |
| 10614 | /// |
| 10615 | static SDValue PerformSUBCombine(SDNode *N, |
| 10616 | TargetLowering::DAGCombinerInfo &DCI) { |
| 10617 | SDValue N0 = N->getOperand(0); |
| 10618 | SDValue N1 = N->getOperand(1); |
| 10619 | |
| 10620 | // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
| 10621 | if (N1.getNode()->hasOneUse()) |
| 10622 | if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI)) |
| 10623 | return Result; |
| 10624 | |
| 10625 | return SDValue(); |
| 10626 | } |
| 10627 | |
| 10628 | /// PerformVMULCombine |
| 10629 | /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the |
| 10630 | /// special multiplier accumulator forwarding. |
| 10631 | /// vmul d3, d0, d2 |
| 10632 | /// vmla d3, d1, d2 |
| 10633 | /// is faster than |
| 10634 | /// vadd d3, d0, d1 |
| 10635 | /// vmul d3, d3, d2 |
| 10636 | // However, for (A + B) * (A + B), |
| 10637 | // vadd d2, d0, d1 |
| 10638 | // vmul d3, d0, d2 |
| 10639 | // vmla d3, d1, d2 |
| 10640 | // is slower than |
| 10641 | // vadd d2, d0, d1 |
| 10642 | // vmul d3, d2, d2 |
| 10643 | static SDValue PerformVMULCombine(SDNode *N, |
| 10644 | TargetLowering::DAGCombinerInfo &DCI, |
| 10645 | const ARMSubtarget *Subtarget) { |
| 10646 | if (!Subtarget->hasVMLxForwarding()) |
| 10647 | return SDValue(); |
| 10648 | |
| 10649 | SelectionDAG &DAG = DCI.DAG; |
| 10650 | SDValue N0 = N->getOperand(0); |
| 10651 | SDValue N1 = N->getOperand(1); |
| 10652 | unsigned Opcode = N0.getOpcode(); |
| 10653 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
| 10654 | Opcode != ISD::FADD && Opcode != ISD::FSUB) { |
| 10655 | Opcode = N1.getOpcode(); |
| 10656 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
| 10657 | Opcode != ISD::FADD && Opcode != ISD::FSUB) |
| 10658 | return SDValue(); |
| 10659 | std::swap(N0, N1); |
| 10660 | } |
| 10661 | |
| 10662 | if (N0 == N1) |
| 10663 | return SDValue(); |
| 10664 | |
| 10665 | EVT VT = N->getValueType(0); |
| 10666 | SDLoc DL(N); |
| 10667 | SDValue N00 = N0->getOperand(0); |
| 10668 | SDValue N01 = N0->getOperand(1); |
| 10669 | return DAG.getNode(Opcode, DL, VT, |
| 10670 | DAG.getNode(ISD::MUL, DL, VT, N00, N1), |
| 10671 | DAG.getNode(ISD::MUL, DL, VT, N01, N1)); |
| 10672 | } |
| 10673 | |
| 10674 | static SDValue PerformMULCombine(SDNode *N, |
| 10675 | TargetLowering::DAGCombinerInfo &DCI, |
| 10676 | const ARMSubtarget *Subtarget) { |
| 10677 | SelectionDAG &DAG = DCI.DAG; |
| 10678 | |
| 10679 | if (Subtarget->isThumb1Only()) |
| 10680 | return SDValue(); |
| 10681 | |
| 10682 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 10683 | return SDValue(); |
| 10684 | |
| 10685 | EVT VT = N->getValueType(0); |
| 10686 | if (VT.is64BitVector() || VT.is128BitVector()) |
| 10687 | return PerformVMULCombine(N, DCI, Subtarget); |
| 10688 | if (VT != MVT::i32) |
| 10689 | return SDValue(); |
| 10690 | |
| 10691 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| 10692 | if (!C) |
| 10693 | return SDValue(); |
| 10694 | |
| 10695 | int64_t MulAmt = C->getSExtValue(); |
| 10696 | unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt); |
| 10697 | |
| 10698 | ShiftAmt = ShiftAmt & (32 - 1); |
| 10699 | SDValue V = N->getOperand(0); |
| 10700 | SDLoc DL(N); |
| 10701 | |
| 10702 | SDValue Res; |
| 10703 | MulAmt >>= ShiftAmt; |
| 10704 | |
| 10705 | if (MulAmt >= 0) { |
| 10706 | if (isPowerOf2_32(MulAmt - 1)) { |
| 10707 | // (mul x, 2^N + 1) => (add (shl x, N), x) |
| 10708 | Res = DAG.getNode(ISD::ADD, DL, VT, |
| 10709 | V, |
| 10710 | DAG.getNode(ISD::SHL, DL, VT, |
| 10711 | V, |
| 10712 | DAG.getConstant(Log2_32(MulAmt - 1), DL, |
| 10713 | MVT::i32))); |
| 10714 | } else if (isPowerOf2_32(MulAmt + 1)) { |
| 10715 | // (mul x, 2^N - 1) => (sub (shl x, N), x) |
| 10716 | Res = DAG.getNode(ISD::SUB, DL, VT, |
| 10717 | DAG.getNode(ISD::SHL, DL, VT, |
| 10718 | V, |
| 10719 | DAG.getConstant(Log2_32(MulAmt + 1), DL, |
| 10720 | MVT::i32)), |
| 10721 | V); |
| 10722 | } else |
| 10723 | return SDValue(); |
| 10724 | } else { |
| 10725 | uint64_t MulAmtAbs = -MulAmt; |
| 10726 | if (isPowerOf2_32(MulAmtAbs + 1)) { |
| 10727 | // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) |
| 10728 | Res = DAG.getNode(ISD::SUB, DL, VT, |
| 10729 | V, |
| 10730 | DAG.getNode(ISD::SHL, DL, VT, |
| 10731 | V, |
| 10732 | DAG.getConstant(Log2_32(MulAmtAbs + 1), DL, |
| 10733 | MVT::i32))); |
| 10734 | } else if (isPowerOf2_32(MulAmtAbs - 1)) { |
| 10735 | // (mul x, -(2^N + 1)) => - (add (shl x, N), x) |
| 10736 | Res = DAG.getNode(ISD::ADD, DL, VT, |
| 10737 | V, |
| 10738 | DAG.getNode(ISD::SHL, DL, VT, |
| 10739 | V, |
| 10740 | DAG.getConstant(Log2_32(MulAmtAbs - 1), DL, |
| 10741 | MVT::i32))); |
| 10742 | Res = DAG.getNode(ISD::SUB, DL, VT, |
| 10743 | DAG.getConstant(0, DL, MVT::i32), Res); |
| 10744 | } else |
| 10745 | return SDValue(); |
| 10746 | } |
| 10747 | |
| 10748 | if (ShiftAmt != 0) |
| 10749 | Res = DAG.getNode(ISD::SHL, DL, VT, |
| 10750 | Res, DAG.getConstant(ShiftAmt, DL, MVT::i32)); |
| 10751 | |
| 10752 | // Do not add new nodes to DAG combiner worklist. |
| 10753 | DCI.CombineTo(N, Res, false); |
| 10754 | return SDValue(); |
| 10755 | } |
| 10756 | |
| 10757 | static SDValue CombineANDShift(SDNode *N, |
| 10758 | TargetLowering::DAGCombinerInfo &DCI, |
| 10759 | const ARMSubtarget *Subtarget) { |
| 10760 | // Allow DAGCombine to pattern-match before we touch the canonical form. |
| 10761 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 10762 | return SDValue(); |
| 10763 | |
| 10764 | if (N->getValueType(0) != MVT::i32) |
| 10765 | return SDValue(); |
| 10766 | |
| 10767 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| 10768 | if (!N1C) |
| 10769 | return SDValue(); |
| 10770 | |
| 10771 | uint32_t C1 = (uint32_t)N1C->getZExtValue(); |
| 10772 | // Don't transform uxtb/uxth. |
| 10773 | if (C1 == 255 || C1 == 65535) |
| 10774 | return SDValue(); |
| 10775 | |
| 10776 | SDNode *N0 = N->getOperand(0).getNode(); |
| 10777 | if (!N0->hasOneUse()) |
| 10778 | return SDValue(); |
| 10779 | |
| 10780 | if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL) |
| 10781 | return SDValue(); |
| 10782 | |
| 10783 | bool LeftShift = N0->getOpcode() == ISD::SHL; |
| 10784 | |
| 10785 | ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1)); |
| 10786 | if (!N01C) |
| 10787 | return SDValue(); |
| 10788 | |
| 10789 | uint32_t C2 = (uint32_t)N01C->getZExtValue(); |
| 10790 | if (!C2 || C2 >= 32) |
| 10791 | return SDValue(); |
| 10792 | |
| 10793 | // Clear irrelevant bits in the mask. |
| 10794 | if (LeftShift) |
| 10795 | C1 &= (-1U << C2); |
| 10796 | else |
| 10797 | C1 &= (-1U >> C2); |
| 10798 | |
| 10799 | SelectionDAG &DAG = DCI.DAG; |
| 10800 | SDLoc DL(N); |
| 10801 | |
| 10802 | // We have a pattern of the form "(and (shl x, c2) c1)" or |
| 10803 | // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to |
| 10804 | // transform to a pair of shifts, to save materializing c1. |
| 10805 | |
| 10806 | // First pattern: right shift, then mask off leading bits. |
| 10807 | // FIXME: Use demanded bits? |
| 10808 | if (!LeftShift && isMask_32(C1)) { |
| 10809 | uint32_t C3 = countLeadingZeros(C1); |
| 10810 | if (C2 < C3) { |
| 10811 | SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), |
| 10812 | DAG.getConstant(C3 - C2, DL, MVT::i32)); |
| 10813 | return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL, |
| 10814 | DAG.getConstant(C3, DL, MVT::i32)); |
| 10815 | } |
| 10816 | } |
| 10817 | |
| 10818 | // First pattern, reversed: left shift, then mask off trailing bits. |
| 10819 | if (LeftShift && isMask_32(~C1)) { |
| 10820 | uint32_t C3 = countTrailingZeros(C1); |
| 10821 | if (C2 < C3) { |
| 10822 | SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0), |
| 10823 | DAG.getConstant(C3 - C2, DL, MVT::i32)); |
| 10824 | return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL, |
| 10825 | DAG.getConstant(C3, DL, MVT::i32)); |
| 10826 | } |
| 10827 | } |
| 10828 | |
| 10829 | // Second pattern: left shift, then mask off leading bits. |
| 10830 | // FIXME: Use demanded bits? |
| 10831 | if (LeftShift && isShiftedMask_32(C1)) { |
| 10832 | uint32_t Trailing = countTrailingZeros(C1); |
| 10833 | uint32_t C3 = countLeadingZeros(C1); |
| 10834 | if (Trailing == C2 && C2 + C3 < 32) { |
| 10835 | SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), |
| 10836 | DAG.getConstant(C2 + C3, DL, MVT::i32)); |
| 10837 | return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL, |
| 10838 | DAG.getConstant(C3, DL, MVT::i32)); |
| 10839 | } |
| 10840 | } |
| 10841 | |
| 10842 | // Second pattern, reversed: right shift, then mask off trailing bits. |
| 10843 | // FIXME: Handle other patterns of known/demanded bits. |
| 10844 | if (!LeftShift && isShiftedMask_32(C1)) { |
| 10845 | uint32_t Leading = countLeadingZeros(C1); |
| 10846 | uint32_t C3 = countTrailingZeros(C1); |
| 10847 | if (Leading == C2 && C2 + C3 < 32) { |
| 10848 | SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0), |
| 10849 | DAG.getConstant(C2 + C3, DL, MVT::i32)); |
| 10850 | return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL, |
| 10851 | DAG.getConstant(C3, DL, MVT::i32)); |
| 10852 | } |
| 10853 | } |
| 10854 | |
| 10855 | // FIXME: Transform "(and (shl x, c2) c1)" -> |
| 10856 | // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than |
| 10857 | // c1. |
| 10858 | return SDValue(); |
| 10859 | } |
| 10860 | |
| 10861 | static SDValue PerformANDCombine(SDNode *N, |
| 10862 | TargetLowering::DAGCombinerInfo &DCI, |
| 10863 | const ARMSubtarget *Subtarget) { |
| 10864 | // Attempt to use immediate-form VBIC |
| 10865 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); |
| 10866 | SDLoc dl(N); |
| 10867 | EVT VT = N->getValueType(0); |
| 10868 | SelectionDAG &DAG = DCI.DAG; |
| 10869 | |
| 10870 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 10871 | return SDValue(); |
| 10872 | |
| 10873 | APInt SplatBits, SplatUndef; |
| 10874 | unsigned SplatBitSize; |
| 10875 | bool HasAnyUndefs; |
| 10876 | if (BVN && |
| 10877 | BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 10878 | if (SplatBitSize <= 64) { |
| 10879 | EVT VbicVT; |
| 10880 | SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), |
| 10881 | SplatUndef.getZExtValue(), SplatBitSize, |
| 10882 | DAG, dl, VbicVT, VT.is128BitVector(), |
| 10883 | OtherModImm); |
| 10884 | if (Val.getNode()) { |
| 10885 | SDValue Input = |
| 10886 | DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); |
| 10887 | SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); |
| 10888 | return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); |
| 10889 | } |
| 10890 | } |
| 10891 | } |
| 10892 | |
| 10893 | if (!Subtarget->isThumb1Only()) { |
| 10894 | // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) |
| 10895 | if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI)) |
| 10896 | return Result; |
| 10897 | |
| 10898 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) |
| 10899 | return Result; |
| 10900 | } |
| 10901 | |
| 10902 | if (Subtarget->isThumb1Only()) |
| 10903 | if (SDValue Result = CombineANDShift(N, DCI, Subtarget)) |
| 10904 | return Result; |
| 10905 | |
| 10906 | return SDValue(); |
| 10907 | } |
| 10908 | |
| 10909 | // Try combining OR nodes to SMULWB, SMULWT. |
| 10910 | static SDValue PerformORCombineToSMULWBT(SDNode *OR, |
| 10911 | TargetLowering::DAGCombinerInfo &DCI, |
| 10912 | const ARMSubtarget *Subtarget) { |
| 10913 | if (!Subtarget->hasV6Ops() || |
| 10914 | (Subtarget->isThumb() && |
| 10915 | (!Subtarget->hasThumb2() || !Subtarget->hasDSP()))) |
| 10916 | return SDValue(); |
| 10917 | |
| 10918 | SDValue SRL = OR->getOperand(0); |
| 10919 | SDValue SHL = OR->getOperand(1); |
| 10920 | |
| 10921 | if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) { |
| 10922 | SRL = OR->getOperand(1); |
| 10923 | SHL = OR->getOperand(0); |
| 10924 | } |
| 10925 | if (!isSRL16(SRL) || !isSHL16(SHL)) |
| 10926 | return SDValue(); |
| 10927 | |
| 10928 | // The first operands to the shifts need to be the two results from the |
| 10929 | // same smul_lohi node. |
| 10930 | if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) || |
| 10931 | SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI) |
| 10932 | return SDValue(); |
| 10933 | |
| 10934 | SDNode *SMULLOHI = SRL.getOperand(0).getNode(); |
| 10935 | if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) || |
| 10936 | SHL.getOperand(0) != SDValue(SMULLOHI, 1)) |
| 10937 | return SDValue(); |
| 10938 | |
| 10939 | // Now we have: |
| 10940 | // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16))) |
| 10941 | // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments. |
| 10942 | // For SMUWB the 16-bit value will signed extended somehow. |
| 10943 | // For SMULWT only the SRA is required. |
| 10944 | // Check both sides of SMUL_LOHI |
| 10945 | SDValue OpS16 = SMULLOHI->getOperand(0); |
| 10946 | SDValue OpS32 = SMULLOHI->getOperand(1); |
| 10947 | |
| 10948 | SelectionDAG &DAG = DCI.DAG; |
| 10949 | if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) { |
| 10950 | OpS16 = OpS32; |
| 10951 | OpS32 = SMULLOHI->getOperand(0); |
| 10952 | } |
| 10953 | |
| 10954 | SDLoc dl(OR); |
| 10955 | unsigned Opcode = 0; |
| 10956 | if (isS16(OpS16, DAG)) |
| 10957 | Opcode = ARMISD::SMULWB; |
| 10958 | else if (isSRA16(OpS16)) { |
| 10959 | Opcode = ARMISD::SMULWT; |
| 10960 | OpS16 = OpS16->getOperand(0); |
| 10961 | } |
| 10962 | else |
| 10963 | return SDValue(); |
| 10964 | |
| 10965 | SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16); |
| 10966 | DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res); |
| 10967 | return SDValue(OR, 0); |
| 10968 | } |
| 10969 | |
| 10970 | static SDValue PerformORCombineToBFI(SDNode *N, |
| 10971 | TargetLowering::DAGCombinerInfo &DCI, |
| 10972 | const ARMSubtarget *Subtarget) { |
| 10973 | // BFI is only available on V6T2+ |
| 10974 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) |
| 10975 | return SDValue(); |
| 10976 | |
| 10977 | EVT VT = N->getValueType(0); |
| 10978 | SDValue N0 = N->getOperand(0); |
| 10979 | SDValue N1 = N->getOperand(1); |
| 10980 | SelectionDAG &DAG = DCI.DAG; |
| 10981 | SDLoc DL(N); |
| 10982 | // 1) or (and A, mask), val => ARMbfi A, val, mask |
| 10983 | // iff (val & mask) == val |
| 10984 | // |
| 10985 | // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
| 10986 | // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) |
| 10987 | // && mask == ~mask2 |
| 10988 | // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) |
| 10989 | // && ~mask == mask2 |
| 10990 | // (i.e., copy a bitfield value into another bitfield of the same width) |
| 10991 | |
| 10992 | if (VT != MVT::i32) |
| 10993 | return SDValue(); |
| 10994 | |
| 10995 | SDValue N00 = N0.getOperand(0); |
| 10996 | |
| 10997 | // The value and the mask need to be constants so we can verify this is |
| 10998 | // actually a bitfield set. If the mask is 0xffff, we can do better |
| 10999 | // via a movt instruction, so don't use BFI in that case. |
| 11000 | SDValue MaskOp = N0.getOperand(1); |
| 11001 | ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); |
| 11002 | if (!MaskC) |
| 11003 | return SDValue(); |
| 11004 | unsigned Mask = MaskC->getZExtValue(); |
| 11005 | if (Mask == 0xffff) |
| 11006 | return SDValue(); |
| 11007 | SDValue Res; |
| 11008 | // Case (1): or (and A, mask), val => ARMbfi A, val, mask |
| 11009 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); |
| 11010 | if (N1C) { |
| 11011 | unsigned Val = N1C->getZExtValue(); |
| 11012 | if ((Val & ~Mask) != Val) |
| 11013 | return SDValue(); |
| 11014 | |
| 11015 | if (ARM::isBitFieldInvertedMask(Mask)) { |
| 11016 | Val >>= countTrailingZeros(~Mask); |
| 11017 | |
| 11018 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, |
| 11019 | DAG.getConstant(Val, DL, MVT::i32), |
| 11020 | DAG.getConstant(Mask, DL, MVT::i32)); |
| 11021 | |
| 11022 | DCI.CombineTo(N, Res, false); |
| 11023 | // Return value from the original node to inform the combiner than N is |
| 11024 | // now dead. |
| 11025 | return SDValue(N, 0); |
| 11026 | } |
| 11027 | } else if (N1.getOpcode() == ISD::AND) { |
| 11028 | // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
| 11029 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); |
| 11030 | if (!N11C) |
| 11031 | return SDValue(); |
| 11032 | unsigned Mask2 = N11C->getZExtValue(); |
| 11033 | |
| 11034 | // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern |
| 11035 | // as is to match. |
| 11036 | if (ARM::isBitFieldInvertedMask(Mask) && |
| 11037 | (Mask == ~Mask2)) { |
| 11038 | // The pack halfword instruction works better for masks that fit it, |
| 11039 | // so use that when it's available. |
| 11040 | if (Subtarget->hasDSP() && |
| 11041 | (Mask == 0xffff || Mask == 0xffff0000)) |
| 11042 | return SDValue(); |
| 11043 | // 2a |
| 11044 | unsigned amt = countTrailingZeros(Mask2); |
| 11045 | Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), |
| 11046 | DAG.getConstant(amt, DL, MVT::i32)); |
| 11047 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, |
| 11048 | DAG.getConstant(Mask, DL, MVT::i32)); |
| 11049 | DCI.CombineTo(N, Res, false); |
| 11050 | // Return value from the original node to inform the combiner than N is |
| 11051 | // now dead. |
| 11052 | return SDValue(N, 0); |
| 11053 | } else if (ARM::isBitFieldInvertedMask(~Mask) && |
| 11054 | (~Mask == Mask2)) { |
| 11055 | // The pack halfword instruction works better for masks that fit it, |
| 11056 | // so use that when it's available. |
| 11057 | if (Subtarget->hasDSP() && |
| 11058 | (Mask2 == 0xffff || Mask2 == 0xffff0000)) |
| 11059 | return SDValue(); |
| 11060 | // 2b |
| 11061 | unsigned lsb = countTrailingZeros(Mask); |
| 11062 | Res = DAG.getNode(ISD::SRL, DL, VT, N00, |
| 11063 | DAG.getConstant(lsb, DL, MVT::i32)); |
| 11064 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, |
| 11065 | DAG.getConstant(Mask2, DL, MVT::i32)); |
| 11066 | DCI.CombineTo(N, Res, false); |
| 11067 | // Return value from the original node to inform the combiner than N is |
| 11068 | // now dead. |
| 11069 | return SDValue(N, 0); |
| 11070 | } |
| 11071 | } |
| 11072 | |
| 11073 | if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && |
| 11074 | N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && |
| 11075 | ARM::isBitFieldInvertedMask(~Mask)) { |
| 11076 | // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask |
| 11077 | // where lsb(mask) == #shamt and masked bits of B are known zero. |
| 11078 | SDValue ShAmt = N00.getOperand(1); |
| 11079 | unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); |
| 11080 | unsigned LSB = countTrailingZeros(Mask); |
| 11081 | if (ShAmtC != LSB) |
| 11082 | return SDValue(); |
| 11083 | |
| 11084 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), |
| 11085 | DAG.getConstant(~Mask, DL, MVT::i32)); |
| 11086 | |
| 11087 | DCI.CombineTo(N, Res, false); |
| 11088 | // Return value from the original node to inform the combiner than N is |
| 11089 | // now dead. |
| 11090 | return SDValue(N, 0); |
| 11091 | } |
| 11092 | |
| 11093 | return SDValue(); |
| 11094 | } |
| 11095 | |
| 11096 | /// PerformORCombine - Target-specific dag combine xforms for ISD::OR |
| 11097 | static SDValue PerformORCombine(SDNode *N, |
| 11098 | TargetLowering::DAGCombinerInfo &DCI, |
| 11099 | const ARMSubtarget *Subtarget) { |
| 11100 | // Attempt to use immediate-form VORR |
| 11101 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); |
| 11102 | SDLoc dl(N); |
| 11103 | EVT VT = N->getValueType(0); |
| 11104 | SelectionDAG &DAG = DCI.DAG; |
| 11105 | |
| 11106 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 11107 | return SDValue(); |
| 11108 | |
| 11109 | APInt SplatBits, SplatUndef; |
| 11110 | unsigned SplatBitSize; |
| 11111 | bool HasAnyUndefs; |
| 11112 | if (BVN && Subtarget->hasNEON() && |
| 11113 | BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 11114 | if (SplatBitSize <= 64) { |
| 11115 | EVT VorrVT; |
| 11116 | SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), |
| 11117 | SplatUndef.getZExtValue(), SplatBitSize, |
| 11118 | DAG, dl, VorrVT, VT.is128BitVector(), |
| 11119 | OtherModImm); |
| 11120 | if (Val.getNode()) { |
| 11121 | SDValue Input = |
| 11122 | DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); |
| 11123 | SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); |
| 11124 | return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); |
| 11125 | } |
| 11126 | } |
| 11127 | } |
| 11128 | |
| 11129 | if (!Subtarget->isThumb1Only()) { |
| 11130 | // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
| 11131 | if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) |
| 11132 | return Result; |
| 11133 | if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget)) |
| 11134 | return Result; |
| 11135 | } |
| 11136 | |
| 11137 | SDValue N0 = N->getOperand(0); |
| 11138 | SDValue N1 = N->getOperand(1); |
| 11139 | |
| 11140 | // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. |
| 11141 | if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && |
| 11142 | DAG.getTargetLoweringInfo().isTypeLegal(VT)) { |
| 11143 | |
| 11144 | // The code below optimizes (or (and X, Y), Z). |
| 11145 | // The AND operand needs to have a single user to make these optimizations |
| 11146 | // profitable. |
| 11147 | if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) |
| 11148 | return SDValue(); |
| 11149 | |
| 11150 | APInt SplatUndef; |
| 11151 | unsigned SplatBitSize; |
| 11152 | bool HasAnyUndefs; |
| 11153 | |
| 11154 | APInt SplatBits0, SplatBits1; |
| 11155 | BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); |
| 11156 | BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); |
| 11157 | // Ensure that the second operand of both ands are constants |
| 11158 | if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, |
| 11159 | HasAnyUndefs) && !HasAnyUndefs) { |
| 11160 | if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, |
| 11161 | HasAnyUndefs) && !HasAnyUndefs) { |
| 11162 | // Ensure that the bit width of the constants are the same and that |
| 11163 | // the splat arguments are logical inverses as per the pattern we |
| 11164 | // are trying to simplify. |
| 11165 | if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && |
| 11166 | SplatBits0 == ~SplatBits1) { |
| 11167 | // Canonicalize the vector type to make instruction selection |
| 11168 | // simpler. |
| 11169 | EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
| 11170 | SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, |
| 11171 | N0->getOperand(1), |
| 11172 | N0->getOperand(0), |
| 11173 | N1->getOperand(0)); |
| 11174 | return DAG.getNode(ISD::BITCAST, dl, VT, Result); |
| 11175 | } |
| 11176 | } |
| 11177 | } |
| 11178 | } |
| 11179 | |
| 11180 | // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when |
| 11181 | // reasonable. |
| 11182 | if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) { |
| 11183 | if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget)) |
| 11184 | return Res; |
| 11185 | } |
| 11186 | |
| 11187 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) |
| 11188 | return Result; |
| 11189 | |
| 11190 | return SDValue(); |
| 11191 | } |
| 11192 | |
| 11193 | static SDValue PerformXORCombine(SDNode *N, |
| 11194 | TargetLowering::DAGCombinerInfo &DCI, |
| 11195 | const ARMSubtarget *Subtarget) { |
| 11196 | EVT VT = N->getValueType(0); |
| 11197 | SelectionDAG &DAG = DCI.DAG; |
| 11198 | |
| 11199 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 11200 | return SDValue(); |
| 11201 | |
| 11202 | if (!Subtarget->isThumb1Only()) { |
| 11203 | // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
| 11204 | if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) |
| 11205 | return Result; |
| 11206 | |
| 11207 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) |
| 11208 | return Result; |
| 11209 | } |
| 11210 | |
| 11211 | return SDValue(); |
| 11212 | } |
| 11213 | |
| 11214 | // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, |
| 11215 | // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and |
| 11216 | // their position in "to" (Rd). |
| 11217 | static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { |
| 11218 | assert(N->getOpcode() == ARMISD::BFI); |
| 11219 | |
| 11220 | SDValue From = N->getOperand(1); |
| 11221 | ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue(); |
| 11222 | FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation()); |
| 11223 | |
| 11224 | // If the Base came from a SHR #C, we can deduce that it is really testing bit |
| 11225 | // #C in the base of the SHR. |
| 11226 | if (From->getOpcode() == ISD::SRL && |
| 11227 | isa<ConstantSDNode>(From->getOperand(1))) { |
| 11228 | APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue(); |
| 11229 | assert(Shift.getLimitedValue() < 32 && "Shift too large!" ); |
| 11230 | FromMask <<= Shift.getLimitedValue(31); |
| 11231 | From = From->getOperand(0); |
| 11232 | } |
| 11233 | |
| 11234 | return From; |
| 11235 | } |
| 11236 | |
| 11237 | // If A and B contain one contiguous set of bits, does A | B == A . B? |
| 11238 | // |
| 11239 | // Neither A nor B must be zero. |
| 11240 | static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { |
| 11241 | unsigned LastActiveBitInA = A.countTrailingZeros(); |
| 11242 | unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1; |
| 11243 | return LastActiveBitInA - 1 == FirstActiveBitInB; |
| 11244 | } |
| 11245 | |
| 11246 | static SDValue FindBFIToCombineWith(SDNode *N) { |
| 11247 | // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with, |
| 11248 | // if one exists. |
| 11249 | APInt ToMask, FromMask; |
| 11250 | SDValue From = ParseBFI(N, ToMask, FromMask); |
| 11251 | SDValue To = N->getOperand(0); |
| 11252 | |
| 11253 | // Now check for a compatible BFI to merge with. We can pass through BFIs that |
| 11254 | // aren't compatible, but not if they set the same bit in their destination as |
| 11255 | // we do (or that of any BFI we're going to combine with). |
| 11256 | SDValue V = To; |
| 11257 | APInt CombinedToMask = ToMask; |
| 11258 | while (V.getOpcode() == ARMISD::BFI) { |
| 11259 | APInt NewToMask, NewFromMask; |
| 11260 | SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask); |
| 11261 | if (NewFrom != From) { |
| 11262 | // This BFI has a different base. Keep going. |
| 11263 | CombinedToMask |= NewToMask; |
| 11264 | V = V.getOperand(0); |
| 11265 | continue; |
| 11266 | } |
| 11267 | |
| 11268 | // Do the written bits conflict with any we've seen so far? |
| 11269 | if ((NewToMask & CombinedToMask).getBoolValue()) |
| 11270 | // Conflicting bits - bail out because going further is unsafe. |
| 11271 | return SDValue(); |
| 11272 | |
| 11273 | // Are the new bits contiguous when combined with the old bits? |
| 11274 | if (BitsProperlyConcatenate(ToMask, NewToMask) && |
| 11275 | BitsProperlyConcatenate(FromMask, NewFromMask)) |
| 11276 | return V; |
| 11277 | if (BitsProperlyConcatenate(NewToMask, ToMask) && |
| 11278 | BitsProperlyConcatenate(NewFromMask, FromMask)) |
| 11279 | return V; |
| 11280 | |
| 11281 | // We've seen a write to some bits, so track it. |
| 11282 | CombinedToMask |= NewToMask; |
| 11283 | // Keep going... |
| 11284 | V = V.getOperand(0); |
| 11285 | } |
| 11286 | |
| 11287 | return SDValue(); |
| 11288 | } |
| 11289 | |
| 11290 | static SDValue PerformBFICombine(SDNode *N, |
| 11291 | TargetLowering::DAGCombinerInfo &DCI) { |
| 11292 | SDValue N1 = N->getOperand(1); |
| 11293 | if (N1.getOpcode() == ISD::AND) { |
| 11294 | // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff |
| 11295 | // the bits being cleared by the AND are not demanded by the BFI. |
| 11296 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); |
| 11297 | if (!N11C) |
| 11298 | return SDValue(); |
| 11299 | unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); |
| 11300 | unsigned LSB = countTrailingZeros(~InvMask); |
| 11301 | unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB; |
| 11302 | assert(Width < |
| 11303 | static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && |
| 11304 | "undefined behavior" ); |
| 11305 | unsigned Mask = (1u << Width) - 1; |
| 11306 | unsigned Mask2 = N11C->getZExtValue(); |
| 11307 | if ((Mask & (~Mask2)) == 0) |
| 11308 | return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0), |
| 11309 | N->getOperand(0), N1.getOperand(0), |
| 11310 | N->getOperand(2)); |
| 11311 | } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) { |
| 11312 | // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes. |
| 11313 | // Keep track of any consecutive bits set that all come from the same base |
| 11314 | // value. We can combine these together into a single BFI. |
| 11315 | SDValue CombineBFI = FindBFIToCombineWith(N); |
| 11316 | if (CombineBFI == SDValue()) |
| 11317 | return SDValue(); |
| 11318 | |
| 11319 | // We've found a BFI. |
| 11320 | APInt ToMask1, FromMask1; |
| 11321 | SDValue From1 = ParseBFI(N, ToMask1, FromMask1); |
| 11322 | |
| 11323 | APInt ToMask2, FromMask2; |
| 11324 | SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2); |
| 11325 | assert(From1 == From2); |
| 11326 | (void)From2; |
| 11327 | |
| 11328 | // First, unlink CombineBFI. |
| 11329 | DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0)); |
| 11330 | // Then create a new BFI, combining the two together. |
| 11331 | APInt NewFromMask = FromMask1 | FromMask2; |
| 11332 | APInt NewToMask = ToMask1 | ToMask2; |
| 11333 | |
| 11334 | EVT VT = N->getValueType(0); |
| 11335 | SDLoc dl(N); |
| 11336 | |
| 11337 | if (NewFromMask[0] == 0) |
| 11338 | From1 = DCI.DAG.getNode( |
| 11339 | ISD::SRL, dl, VT, From1, |
| 11340 | DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT)); |
| 11341 | return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1, |
| 11342 | DCI.DAG.getConstant(~NewToMask, dl, VT)); |
| 11343 | } |
| 11344 | return SDValue(); |
| 11345 | } |
| 11346 | |
| 11347 | /// PerformVMOVRRDCombine - Target-specific dag combine xforms for |
| 11348 | /// ARMISD::VMOVRRD. |
| 11349 | static SDValue PerformVMOVRRDCombine(SDNode *N, |
| 11350 | TargetLowering::DAGCombinerInfo &DCI, |
| 11351 | const ARMSubtarget *Subtarget) { |
| 11352 | // vmovrrd(vmovdrr x, y) -> x,y |
| 11353 | SDValue InDouble = N->getOperand(0); |
| 11354 | if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64()) |
| 11355 | return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); |
| 11356 | |
| 11357 | // vmovrrd(load f64) -> (load i32), (load i32) |
| 11358 | SDNode *InNode = InDouble.getNode(); |
| 11359 | if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && |
| 11360 | InNode->getValueType(0) == MVT::f64 && |
| 11361 | InNode->getOperand(1).getOpcode() == ISD::FrameIndex && |
| 11362 | !cast<LoadSDNode>(InNode)->isVolatile()) { |
| 11363 | // TODO: Should this be done for non-FrameIndex operands? |
| 11364 | LoadSDNode *LD = cast<LoadSDNode>(InNode); |
| 11365 | |
| 11366 | SelectionDAG &DAG = DCI.DAG; |
| 11367 | SDLoc DL(LD); |
| 11368 | SDValue BasePtr = LD->getBasePtr(); |
| 11369 | SDValue NewLD1 = |
| 11370 | DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(), |
| 11371 | LD->getAlignment(), LD->getMemOperand()->getFlags()); |
| 11372 | |
| 11373 | SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, |
| 11374 | DAG.getConstant(4, DL, MVT::i32)); |
| 11375 | SDValue NewLD2 = DAG.getLoad( |
| 11376 | MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, LD->getPointerInfo(), |
| 11377 | std::min(4U, LD->getAlignment() / 2), LD->getMemOperand()->getFlags()); |
| 11378 | |
| 11379 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); |
| 11380 | if (DCI.DAG.getDataLayout().isBigEndian()) |
| 11381 | std::swap (NewLD1, NewLD2); |
| 11382 | SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); |
| 11383 | return Result; |
| 11384 | } |
| 11385 | |
| 11386 | return SDValue(); |
| 11387 | } |
| 11388 | |
| 11389 | /// PerformVMOVDRRCombine - Target-specific dag combine xforms for |
| 11390 | /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. |
| 11391 | static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { |
| 11392 | // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) |
| 11393 | SDValue Op0 = N->getOperand(0); |
| 11394 | SDValue Op1 = N->getOperand(1); |
| 11395 | if (Op0.getOpcode() == ISD::BITCAST) |
| 11396 | Op0 = Op0.getOperand(0); |
| 11397 | if (Op1.getOpcode() == ISD::BITCAST) |
| 11398 | Op1 = Op1.getOperand(0); |
| 11399 | if (Op0.getOpcode() == ARMISD::VMOVRRD && |
| 11400 | Op0.getNode() == Op1.getNode() && |
| 11401 | Op0.getResNo() == 0 && Op1.getResNo() == 1) |
| 11402 | return DAG.getNode(ISD::BITCAST, SDLoc(N), |
| 11403 | N->getValueType(0), Op0.getOperand(0)); |
| 11404 | return SDValue(); |
| 11405 | } |
| 11406 | |
| 11407 | /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node |
| 11408 | /// are normal, non-volatile loads. If so, it is profitable to bitcast an |
| 11409 | /// i64 vector to have f64 elements, since the value can then be loaded |
| 11410 | /// directly into a VFP register. |
| 11411 | static bool hasNormalLoadOperand(SDNode *N) { |
| 11412 | unsigned NumElts = N->getValueType(0).getVectorNumElements(); |
| 11413 | for (unsigned i = 0; i < NumElts; ++i) { |
| 11414 | SDNode *Elt = N->getOperand(i).getNode(); |
| 11415 | if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) |
| 11416 | return true; |
| 11417 | } |
| 11418 | return false; |
| 11419 | } |
| 11420 | |
| 11421 | /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for |
| 11422 | /// ISD::BUILD_VECTOR. |
| 11423 | static SDValue PerformBUILD_VECTORCombine(SDNode *N, |
| 11424 | TargetLowering::DAGCombinerInfo &DCI, |
| 11425 | const ARMSubtarget *Subtarget) { |
| 11426 | // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): |
| 11427 | // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value |
| 11428 | // into a pair of GPRs, which is fine when the value is used as a scalar, |
| 11429 | // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. |
| 11430 | SelectionDAG &DAG = DCI.DAG; |
| 11431 | if (N->getNumOperands() == 2) |
| 11432 | if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) |
| 11433 | return RV; |
| 11434 | |
| 11435 | // Load i64 elements as f64 values so that type legalization does not split |
| 11436 | // them up into i32 values. |
| 11437 | EVT VT = N->getValueType(0); |
| 11438 | if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) |
| 11439 | return SDValue(); |
| 11440 | SDLoc dl(N); |
| 11441 | SmallVector<SDValue, 8> Ops; |
| 11442 | unsigned NumElts = VT.getVectorNumElements(); |
| 11443 | for (unsigned i = 0; i < NumElts; ++i) { |
| 11444 | SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); |
| 11445 | Ops.push_back(V); |
| 11446 | // Make the DAGCombiner fold the bitcast. |
| 11447 | DCI.AddToWorklist(V.getNode()); |
| 11448 | } |
| 11449 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); |
| 11450 | SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops); |
| 11451 | return DAG.getNode(ISD::BITCAST, dl, VT, BV); |
| 11452 | } |
| 11453 | |
| 11454 | /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. |
| 11455 | static SDValue |
| 11456 | PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 11457 | // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. |
| 11458 | // At that time, we may have inserted bitcasts from integer to float. |
| 11459 | // If these bitcasts have survived DAGCombine, change the lowering of this |
| 11460 | // BUILD_VECTOR in something more vector friendly, i.e., that does not |
| 11461 | // force to use floating point types. |
| 11462 | |
| 11463 | // Make sure we can change the type of the vector. |
| 11464 | // This is possible iff: |
| 11465 | // 1. The vector is only used in a bitcast to a integer type. I.e., |
| 11466 | // 1.1. Vector is used only once. |
| 11467 | // 1.2. Use is a bit convert to an integer type. |
| 11468 | // 2. The size of its operands are 32-bits (64-bits are not legal). |
| 11469 | EVT VT = N->getValueType(0); |
| 11470 | EVT EltVT = VT.getVectorElementType(); |
| 11471 | |
| 11472 | // Check 1.1. and 2. |
| 11473 | if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) |
| 11474 | return SDValue(); |
| 11475 | |
| 11476 | // By construction, the input type must be float. |
| 11477 | assert(EltVT == MVT::f32 && "Unexpected type!" ); |
| 11478 | |
| 11479 | // Check 1.2. |
| 11480 | SDNode *Use = *N->use_begin(); |
| 11481 | if (Use->getOpcode() != ISD::BITCAST || |
| 11482 | Use->getValueType(0).isFloatingPoint()) |
| 11483 | return SDValue(); |
| 11484 | |
| 11485 | // Check profitability. |
| 11486 | // Model is, if more than half of the relevant operands are bitcast from |
| 11487 | // i32, turn the build_vector into a sequence of insert_vector_elt. |
| 11488 | // Relevant operands are everything that is not statically |
| 11489 | // (i.e., at compile time) bitcasted. |
| 11490 | unsigned NumOfBitCastedElts = 0; |
| 11491 | unsigned NumElts = VT.getVectorNumElements(); |
| 11492 | unsigned NumOfRelevantElts = NumElts; |
| 11493 | for (unsigned Idx = 0; Idx < NumElts; ++Idx) { |
| 11494 | SDValue Elt = N->getOperand(Idx); |
| 11495 | if (Elt->getOpcode() == ISD::BITCAST) { |
| 11496 | // Assume only bit cast to i32 will go away. |
| 11497 | if (Elt->getOperand(0).getValueType() == MVT::i32) |
| 11498 | ++NumOfBitCastedElts; |
| 11499 | } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt)) |
| 11500 | // Constants are statically casted, thus do not count them as |
| 11501 | // relevant operands. |
| 11502 | --NumOfRelevantElts; |
| 11503 | } |
| 11504 | |
| 11505 | // Check if more than half of the elements require a non-free bitcast. |
| 11506 | if (NumOfBitCastedElts <= NumOfRelevantElts / 2) |
| 11507 | return SDValue(); |
| 11508 | |
| 11509 | SelectionDAG &DAG = DCI.DAG; |
| 11510 | // Create the new vector type. |
| 11511 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); |
| 11512 | // Check if the type is legal. |
| 11513 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 11514 | if (!TLI.isTypeLegal(VecVT)) |
| 11515 | return SDValue(); |
| 11516 | |
| 11517 | // Combine: |
| 11518 | // ARMISD::BUILD_VECTOR E1, E2, ..., EN. |
| 11519 | // => BITCAST INSERT_VECTOR_ELT |
| 11520 | // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), |
| 11521 | // (BITCAST EN), N. |
| 11522 | SDValue Vec = DAG.getUNDEF(VecVT); |
| 11523 | SDLoc dl(N); |
| 11524 | for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { |
| 11525 | SDValue V = N->getOperand(Idx); |
| 11526 | if (V.isUndef()) |
| 11527 | continue; |
| 11528 | if (V.getOpcode() == ISD::BITCAST && |
| 11529 | V->getOperand(0).getValueType() == MVT::i32) |
| 11530 | // Fold obvious case. |
| 11531 | V = V.getOperand(0); |
| 11532 | else { |
| 11533 | V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V); |
| 11534 | // Make the DAGCombiner fold the bitcasts. |
| 11535 | DCI.AddToWorklist(V.getNode()); |
| 11536 | } |
| 11537 | SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32); |
| 11538 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx); |
| 11539 | } |
| 11540 | Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec); |
| 11541 | // Make the DAGCombiner fold the bitcasts. |
| 11542 | DCI.AddToWorklist(Vec.getNode()); |
| 11543 | return Vec; |
| 11544 | } |
| 11545 | |
| 11546 | /// PerformInsertEltCombine - Target-specific dag combine xforms for |
| 11547 | /// ISD::INSERT_VECTOR_ELT. |
| 11548 | static SDValue PerformInsertEltCombine(SDNode *N, |
| 11549 | TargetLowering::DAGCombinerInfo &DCI) { |
| 11550 | // Bitcast an i64 load inserted into a vector to f64. |
| 11551 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
| 11552 | EVT VT = N->getValueType(0); |
| 11553 | SDNode *Elt = N->getOperand(1).getNode(); |
| 11554 | if (VT.getVectorElementType() != MVT::i64 || |
| 11555 | !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) |
| 11556 | return SDValue(); |
| 11557 | |
| 11558 | SelectionDAG &DAG = DCI.DAG; |
| 11559 | SDLoc dl(N); |
| 11560 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, |
| 11561 | VT.getVectorNumElements()); |
| 11562 | SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); |
| 11563 | SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); |
| 11564 | // Make the DAGCombiner fold the bitcasts. |
| 11565 | DCI.AddToWorklist(Vec.getNode()); |
| 11566 | DCI.AddToWorklist(V.getNode()); |
| 11567 | SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, |
| 11568 | Vec, V, N->getOperand(2)); |
| 11569 | return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); |
| 11570 | } |
| 11571 | |
| 11572 | /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for |
| 11573 | /// ISD::VECTOR_SHUFFLE. |
| 11574 | static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { |
| 11575 | // The LLVM shufflevector instruction does not require the shuffle mask |
| 11576 | // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does |
| 11577 | // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the |
| 11578 | // operands do not match the mask length, they are extended by concatenating |
| 11579 | // them with undef vectors. That is probably the right thing for other |
| 11580 | // targets, but for NEON it is better to concatenate two double-register |
| 11581 | // size vector operands into a single quad-register size vector. Do that |
| 11582 | // transformation here: |
| 11583 | // shuffle(concat(v1, undef), concat(v2, undef)) -> |
| 11584 | // shuffle(concat(v1, v2), undef) |
| 11585 | SDValue Op0 = N->getOperand(0); |
| 11586 | SDValue Op1 = N->getOperand(1); |
| 11587 | if (Op0.getOpcode() != ISD::CONCAT_VECTORS || |
| 11588 | Op1.getOpcode() != ISD::CONCAT_VECTORS || |
| 11589 | Op0.getNumOperands() != 2 || |
| 11590 | Op1.getNumOperands() != 2) |
| 11591 | return SDValue(); |
| 11592 | SDValue Concat0Op1 = Op0.getOperand(1); |
| 11593 | SDValue Concat1Op1 = Op1.getOperand(1); |
| 11594 | if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) |
| 11595 | return SDValue(); |
| 11596 | // Skip the transformation if any of the types are illegal. |
| 11597 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 11598 | EVT VT = N->getValueType(0); |
| 11599 | if (!TLI.isTypeLegal(VT) || |
| 11600 | !TLI.isTypeLegal(Concat0Op1.getValueType()) || |
| 11601 | !TLI.isTypeLegal(Concat1Op1.getValueType())) |
| 11602 | return SDValue(); |
| 11603 | |
| 11604 | SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, |
| 11605 | Op0.getOperand(0), Op1.getOperand(0)); |
| 11606 | // Translate the shuffle mask. |
| 11607 | SmallVector<int, 16> NewMask; |
| 11608 | unsigned NumElts = VT.getVectorNumElements(); |
| 11609 | unsigned HalfElts = NumElts/2; |
| 11610 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); |
| 11611 | for (unsigned n = 0; n < NumElts; ++n) { |
| 11612 | int MaskElt = SVN->getMaskElt(n); |
| 11613 | int NewElt = -1; |
| 11614 | if (MaskElt < (int)HalfElts) |
| 11615 | NewElt = MaskElt; |
| 11616 | else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) |
| 11617 | NewElt = HalfElts + MaskElt - NumElts; |
| 11618 | NewMask.push_back(NewElt); |
| 11619 | } |
| 11620 | return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat, |
| 11621 | DAG.getUNDEF(VT), NewMask); |
| 11622 | } |
| 11623 | |
| 11624 | /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, |
| 11625 | /// NEON load/store intrinsics, and generic vector load/stores, to merge |
| 11626 | /// base address updates. |
| 11627 | /// For generic load/stores, the memory type is assumed to be a vector. |
| 11628 | /// The caller is assumed to have checked legality. |
| 11629 | static SDValue CombineBaseUpdate(SDNode *N, |
| 11630 | TargetLowering::DAGCombinerInfo &DCI) { |
| 11631 | SelectionDAG &DAG = DCI.DAG; |
| 11632 | const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || |
| 11633 | N->getOpcode() == ISD::INTRINSIC_W_CHAIN); |
| 11634 | const bool isStore = N->getOpcode() == ISD::STORE; |
| 11635 | const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); |
| 11636 | SDValue Addr = N->getOperand(AddrOpIdx); |
| 11637 | MemSDNode *MemN = cast<MemSDNode>(N); |
| 11638 | SDLoc dl(N); |
| 11639 | |
| 11640 | // Search for a use of the address operand that is an increment. |
| 11641 | for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), |
| 11642 | UE = Addr.getNode()->use_end(); UI != UE; ++UI) { |
| 11643 | SDNode *User = *UI; |
| 11644 | if (User->getOpcode() != ISD::ADD || |
| 11645 | UI.getUse().getResNo() != Addr.getResNo()) |
| 11646 | continue; |
| 11647 | |
| 11648 | // Check that the add is independent of the load/store. Otherwise, folding |
| 11649 | // it would create a cycle. We can avoid searching through Addr as it's a |
| 11650 | // predecessor to both. |
| 11651 | SmallPtrSet<const SDNode *, 32> Visited; |
| 11652 | SmallVector<const SDNode *, 16> Worklist; |
| 11653 | Visited.insert(Addr.getNode()); |
| 11654 | Worklist.push_back(N); |
| 11655 | Worklist.push_back(User); |
| 11656 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || |
| 11657 | SDNode::hasPredecessorHelper(User, Visited, Worklist)) |
| 11658 | continue; |
| 11659 | |
| 11660 | // Find the new opcode for the updating load/store. |
| 11661 | bool isLoadOp = true; |
| 11662 | bool isLaneOp = false; |
| 11663 | unsigned NewOpc = 0; |
| 11664 | unsigned NumVecs = 0; |
| 11665 | if (isIntrinsic) { |
| 11666 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); |
| 11667 | switch (IntNo) { |
| 11668 | default: llvm_unreachable("unexpected intrinsic for Neon base update" ); |
| 11669 | case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; |
| 11670 | NumVecs = 1; break; |
| 11671 | case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; |
| 11672 | NumVecs = 2; break; |
| 11673 | case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; |
| 11674 | NumVecs = 3; break; |
| 11675 | case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; |
| 11676 | NumVecs = 4; break; |
| 11677 | case Intrinsic::arm_neon_vld2dup: |
| 11678 | case Intrinsic::arm_neon_vld3dup: |
| 11679 | case Intrinsic::arm_neon_vld4dup: |
| 11680 | // TODO: Support updating VLDxDUP nodes. For now, we just skip |
| 11681 | // combining base updates for such intrinsics. |
| 11682 | continue; |
| 11683 | case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; |
| 11684 | NumVecs = 2; isLaneOp = true; break; |
| 11685 | case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; |
| 11686 | NumVecs = 3; isLaneOp = true; break; |
| 11687 | case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; |
| 11688 | NumVecs = 4; isLaneOp = true; break; |
| 11689 | case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; |
| 11690 | NumVecs = 1; isLoadOp = false; break; |
| 11691 | case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; |
| 11692 | NumVecs = 2; isLoadOp = false; break; |
| 11693 | case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; |
| 11694 | NumVecs = 3; isLoadOp = false; break; |
| 11695 | case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; |
| 11696 | NumVecs = 4; isLoadOp = false; break; |
| 11697 | case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; |
| 11698 | NumVecs = 2; isLoadOp = false; isLaneOp = true; break; |
| 11699 | case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; |
| 11700 | NumVecs = 3; isLoadOp = false; isLaneOp = true; break; |
| 11701 | case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; |
| 11702 | NumVecs = 4; isLoadOp = false; isLaneOp = true; break; |
| 11703 | } |
| 11704 | } else { |
| 11705 | isLaneOp = true; |
| 11706 | switch (N->getOpcode()) { |
| 11707 | default: llvm_unreachable("unexpected opcode for Neon base update" ); |
| 11708 | case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break; |
| 11709 | case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; |
| 11710 | case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; |
| 11711 | case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; |
| 11712 | case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD; |
| 11713 | NumVecs = 1; isLaneOp = false; break; |
| 11714 | case ISD::STORE: NewOpc = ARMISD::VST1_UPD; |
| 11715 | NumVecs = 1; isLaneOp = false; isLoadOp = false; break; |
| 11716 | } |
| 11717 | } |
| 11718 | |
| 11719 | // Find the size of memory referenced by the load/store. |
| 11720 | EVT VecTy; |
| 11721 | if (isLoadOp) { |
| 11722 | VecTy = N->getValueType(0); |
| 11723 | } else if (isIntrinsic) { |
| 11724 | VecTy = N->getOperand(AddrOpIdx+1).getValueType(); |
| 11725 | } else { |
| 11726 | assert(isStore && "Node has to be a load, a store, or an intrinsic!" ); |
| 11727 | VecTy = N->getOperand(1).getValueType(); |
| 11728 | } |
| 11729 | |
| 11730 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; |
| 11731 | if (isLaneOp) |
| 11732 | NumBytes /= VecTy.getVectorNumElements(); |
| 11733 | |
| 11734 | // If the increment is a constant, it must match the memory ref size. |
| 11735 | SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); |
| 11736 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode()); |
| 11737 | if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) { |
| 11738 | // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two |
| 11739 | // separate instructions that make it harder to use a non-constant update. |
| 11740 | continue; |
| 11741 | } |
| 11742 | |
| 11743 | // OK, we found an ADD we can fold into the base update. |
| 11744 | // Now, create a _UPD node, taking care of not breaking alignment. |
| 11745 | |
| 11746 | EVT AlignedVecTy = VecTy; |
| 11747 | unsigned Alignment = MemN->getAlignment(); |
| 11748 | |
| 11749 | // If this is a less-than-standard-aligned load/store, change the type to |
| 11750 | // match the standard alignment. |
| 11751 | // The alignment is overlooked when selecting _UPD variants; and it's |
| 11752 | // easier to introduce bitcasts here than fix that. |
| 11753 | // There are 3 ways to get to this base-update combine: |
| 11754 | // - intrinsics: they are assumed to be properly aligned (to the standard |
| 11755 | // alignment of the memory type), so we don't need to do anything. |
| 11756 | // - ARMISD::VLDx nodes: they are only generated from the aforementioned |
| 11757 | // intrinsics, so, likewise, there's nothing to do. |
| 11758 | // - generic load/store instructions: the alignment is specified as an |
| 11759 | // explicit operand, rather than implicitly as the standard alignment |
| 11760 | // of the memory type (like the intrisics). We need to change the |
| 11761 | // memory type to match the explicit alignment. That way, we don't |
| 11762 | // generate non-standard-aligned ARMISD::VLDx nodes. |
| 11763 | if (isa<LSBaseSDNode>(N)) { |
| 11764 | if (Alignment == 0) |
| 11765 | Alignment = 1; |
| 11766 | if (Alignment < VecTy.getScalarSizeInBits() / 8) { |
| 11767 | MVT EltTy = MVT::getIntegerVT(Alignment * 8); |
| 11768 | assert(NumVecs == 1 && "Unexpected multi-element generic load/store." ); |
| 11769 | assert(!isLaneOp && "Unexpected generic load/store lane." ); |
| 11770 | unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); |
| 11771 | AlignedVecTy = MVT::getVectorVT(EltTy, NumElts); |
| 11772 | } |
| 11773 | // Don't set an explicit alignment on regular load/stores that we want |
| 11774 | // to transform to VLD/VST 1_UPD nodes. |
| 11775 | // This matches the behavior of regular load/stores, which only get an |
| 11776 | // explicit alignment if the MMO alignment is larger than the standard |
| 11777 | // alignment of the memory type. |
| 11778 | // Intrinsics, however, always get an explicit alignment, set to the |
| 11779 | // alignment of the MMO. |
| 11780 | Alignment = 1; |
| 11781 | } |
| 11782 | |
| 11783 | // Create the new updating load/store node. |
| 11784 | // First, create an SDVTList for the new updating node's results. |
| 11785 | EVT Tys[6]; |
| 11786 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); |
| 11787 | unsigned n; |
| 11788 | for (n = 0; n < NumResultVecs; ++n) |
| 11789 | Tys[n] = AlignedVecTy; |
| 11790 | Tys[n++] = MVT::i32; |
| 11791 | Tys[n] = MVT::Other; |
| 11792 | SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2)); |
| 11793 | |
| 11794 | // Then, gather the new node's operands. |
| 11795 | SmallVector<SDValue, 8> Ops; |
| 11796 | Ops.push_back(N->getOperand(0)); // incoming chain |
| 11797 | Ops.push_back(N->getOperand(AddrOpIdx)); |
| 11798 | Ops.push_back(Inc); |
| 11799 | |
| 11800 | if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) { |
| 11801 | // Try to match the intrinsic's signature |
| 11802 | Ops.push_back(StN->getValue()); |
| 11803 | } else { |
| 11804 | // Loads (and of course intrinsics) match the intrinsics' signature, |
| 11805 | // so just add all but the alignment operand. |
| 11806 | for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i) |
| 11807 | Ops.push_back(N->getOperand(i)); |
| 11808 | } |
| 11809 | |
| 11810 | // For all node types, the alignment operand is always the last one. |
| 11811 | Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32)); |
| 11812 | |
| 11813 | // If this is a non-standard-aligned STORE, the penultimate operand is the |
| 11814 | // stored value. Bitcast it to the aligned type. |
| 11815 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { |
| 11816 | SDValue &StVal = Ops[Ops.size()-2]; |
| 11817 | StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal); |
| 11818 | } |
| 11819 | |
| 11820 | EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy; |
| 11821 | SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT, |
| 11822 | MemN->getMemOperand()); |
| 11823 | |
| 11824 | // Update the uses. |
| 11825 | SmallVector<SDValue, 5> NewResults; |
| 11826 | for (unsigned i = 0; i < NumResultVecs; ++i) |
| 11827 | NewResults.push_back(SDValue(UpdN.getNode(), i)); |
| 11828 | |
| 11829 | // If this is an non-standard-aligned LOAD, the first result is the loaded |
| 11830 | // value. Bitcast it to the expected result type. |
| 11831 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { |
| 11832 | SDValue &LdVal = NewResults[0]; |
| 11833 | LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal); |
| 11834 | } |
| 11835 | |
| 11836 | NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain |
| 11837 | DCI.CombineTo(N, NewResults); |
| 11838 | DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); |
| 11839 | |
| 11840 | break; |
| 11841 | } |
| 11842 | return SDValue(); |
| 11843 | } |
| 11844 | |
| 11845 | static SDValue PerformVLDCombine(SDNode *N, |
| 11846 | TargetLowering::DAGCombinerInfo &DCI) { |
| 11847 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 11848 | return SDValue(); |
| 11849 | |
| 11850 | return CombineBaseUpdate(N, DCI); |
| 11851 | } |
| 11852 | |
| 11853 | /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a |
| 11854 | /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic |
| 11855 | /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and |
| 11856 | /// return true. |
| 11857 | static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 11858 | SelectionDAG &DAG = DCI.DAG; |
| 11859 | EVT VT = N->getValueType(0); |
| 11860 | // vldN-dup instructions only support 64-bit vectors for N > 1. |
| 11861 | if (!VT.is64BitVector()) |
| 11862 | return false; |
| 11863 | |
| 11864 | // Check if the VDUPLANE operand is a vldN-dup intrinsic. |
| 11865 | SDNode *VLD = N->getOperand(0).getNode(); |
| 11866 | if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) |
| 11867 | return false; |
| 11868 | unsigned NumVecs = 0; |
| 11869 | unsigned NewOpc = 0; |
| 11870 | unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); |
| 11871 | if (IntNo == Intrinsic::arm_neon_vld2lane) { |
| 11872 | NumVecs = 2; |
| 11873 | NewOpc = ARMISD::VLD2DUP; |
| 11874 | } else if (IntNo == Intrinsic::arm_neon_vld3lane) { |
| 11875 | NumVecs = 3; |
| 11876 | NewOpc = ARMISD::VLD3DUP; |
| 11877 | } else if (IntNo == Intrinsic::arm_neon_vld4lane) { |
| 11878 | NumVecs = 4; |
| 11879 | NewOpc = ARMISD::VLD4DUP; |
| 11880 | } else { |
| 11881 | return false; |
| 11882 | } |
| 11883 | |
| 11884 | // First check that all the vldN-lane uses are VDUPLANEs and that the lane |
| 11885 | // numbers match the load. |
| 11886 | unsigned VLDLaneNo = |
| 11887 | cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); |
| 11888 | for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); |
| 11889 | UI != UE; ++UI) { |
| 11890 | // Ignore uses of the chain result. |
| 11891 | if (UI.getUse().getResNo() == NumVecs) |
| 11892 | continue; |
| 11893 | SDNode *User = *UI; |
| 11894 | if (User->getOpcode() != ARMISD::VDUPLANE || |
| 11895 | VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) |
| 11896 | return false; |
| 11897 | } |
| 11898 | |
| 11899 | // Create the vldN-dup node. |
| 11900 | EVT Tys[5]; |
| 11901 | unsigned n; |
| 11902 | for (n = 0; n < NumVecs; ++n) |
| 11903 | Tys[n] = VT; |
| 11904 | Tys[n] = MVT::Other; |
| 11905 | SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1)); |
| 11906 | SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; |
| 11907 | MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); |
| 11908 | SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, |
| 11909 | Ops, VLDMemInt->getMemoryVT(), |
| 11910 | VLDMemInt->getMemOperand()); |
| 11911 | |
| 11912 | // Update the uses. |
| 11913 | for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); |
| 11914 | UI != UE; ++UI) { |
| 11915 | unsigned ResNo = UI.getUse().getResNo(); |
| 11916 | // Ignore uses of the chain result. |
| 11917 | if (ResNo == NumVecs) |
| 11918 | continue; |
| 11919 | SDNode *User = *UI; |
| 11920 | DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); |
| 11921 | } |
| 11922 | |
| 11923 | // Now the vldN-lane intrinsic is dead except for its chain result. |
| 11924 | // Update uses of the chain. |
| 11925 | std::vector<SDValue> VLDDupResults; |
| 11926 | for (unsigned n = 0; n < NumVecs; ++n) |
| 11927 | VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); |
| 11928 | VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); |
| 11929 | DCI.CombineTo(VLD, VLDDupResults); |
| 11930 | |
| 11931 | return true; |
| 11932 | } |
| 11933 | |
| 11934 | /// PerformVDUPLANECombine - Target-specific dag combine xforms for |
| 11935 | /// ARMISD::VDUPLANE. |
| 11936 | static SDValue PerformVDUPLANECombine(SDNode *N, |
| 11937 | TargetLowering::DAGCombinerInfo &DCI) { |
| 11938 | SDValue Op = N->getOperand(0); |
| 11939 | |
| 11940 | // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses |
| 11941 | // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. |
| 11942 | if (CombineVLDDUP(N, DCI)) |
| 11943 | return SDValue(N, 0); |
| 11944 | |
| 11945 | // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is |
| 11946 | // redundant. Ignore bit_converts for now; element sizes are checked below. |
| 11947 | while (Op.getOpcode() == ISD::BITCAST) |
| 11948 | Op = Op.getOperand(0); |
| 11949 | if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) |
| 11950 | return SDValue(); |
| 11951 | |
| 11952 | // Make sure the VMOV element size is not bigger than the VDUPLANE elements. |
| 11953 | unsigned EltSize = Op.getScalarValueSizeInBits(); |
| 11954 | // The canonical VMOV for a zero vector uses a 32-bit element size. |
| 11955 | unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
| 11956 | unsigned EltBits; |
| 11957 | if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) |
| 11958 | EltSize = 8; |
| 11959 | EVT VT = N->getValueType(0); |
| 11960 | if (EltSize > VT.getScalarSizeInBits()) |
| 11961 | return SDValue(); |
| 11962 | |
| 11963 | return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); |
| 11964 | } |
| 11965 | |
| 11966 | /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP. |
| 11967 | static SDValue PerformVDUPCombine(SDNode *N, |
| 11968 | TargetLowering::DAGCombinerInfo &DCI) { |
| 11969 | SelectionDAG &DAG = DCI.DAG; |
| 11970 | SDValue Op = N->getOperand(0); |
| 11971 | |
| 11972 | // Match VDUP(LOAD) -> VLD1DUP. |
| 11973 | // We match this pattern here rather than waiting for isel because the |
| 11974 | // transform is only legal for unindexed loads. |
| 11975 | LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()); |
| 11976 | if (LD && Op.hasOneUse() && LD->isUnindexed() && |
| 11977 | LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) { |
| 11978 | SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1), |
| 11979 | DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) }; |
| 11980 | SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other); |
| 11981 | SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys, |
| 11982 | Ops, LD->getMemoryVT(), |
| 11983 | LD->getMemOperand()); |
| 11984 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1)); |
| 11985 | return VLDDup; |
| 11986 | } |
| 11987 | |
| 11988 | return SDValue(); |
| 11989 | } |
| 11990 | |
| 11991 | static SDValue PerformLOADCombine(SDNode *N, |
| 11992 | TargetLowering::DAGCombinerInfo &DCI) { |
| 11993 | EVT VT = N->getValueType(0); |
| 11994 | |
| 11995 | // If this is a legal vector load, try to combine it into a VLD1_UPD. |
| 11996 | if (ISD::isNormalLoad(N) && VT.isVector() && |
| 11997 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 11998 | return CombineBaseUpdate(N, DCI); |
| 11999 | |
| 12000 | return SDValue(); |
| 12001 | } |
| 12002 | |
| 12003 | /// PerformSTORECombine - Target-specific dag combine xforms for |
| 12004 | /// ISD::STORE. |
| 12005 | static SDValue PerformSTORECombine(SDNode *N, |
| 12006 | TargetLowering::DAGCombinerInfo &DCI) { |
| 12007 | StoreSDNode *St = cast<StoreSDNode>(N); |
| 12008 | if (St->isVolatile()) |
| 12009 | return SDValue(); |
| 12010 | |
| 12011 | // Optimize trunc store (of multiple scalars) to shuffle and store. First, |
| 12012 | // pack all of the elements in one place. Next, store to memory in fewer |
| 12013 | // chunks. |
| 12014 | SDValue StVal = St->getValue(); |
| 12015 | EVT VT = StVal.getValueType(); |
| 12016 | if (St->isTruncatingStore() && VT.isVector()) { |
| 12017 | SelectionDAG &DAG = DCI.DAG; |
| 12018 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12019 | EVT StVT = St->getMemoryVT(); |
| 12020 | unsigned NumElems = VT.getVectorNumElements(); |
| 12021 | assert(StVT != VT && "Cannot truncate to the same type" ); |
| 12022 | unsigned FromEltSz = VT.getScalarSizeInBits(); |
| 12023 | unsigned ToEltSz = StVT.getScalarSizeInBits(); |
| 12024 | |
| 12025 | // From, To sizes and ElemCount must be pow of two |
| 12026 | if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue(); |
| 12027 | |
| 12028 | // We are going to use the original vector elt for storing. |
| 12029 | // Accumulated smaller vector elements must be a multiple of the store size. |
| 12030 | if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue(); |
| 12031 | |
| 12032 | unsigned SizeRatio = FromEltSz / ToEltSz; |
| 12033 | assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); |
| 12034 | |
| 12035 | // Create a type on which we perform the shuffle. |
| 12036 | EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), |
| 12037 | NumElems*SizeRatio); |
| 12038 | assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); |
| 12039 | |
| 12040 | SDLoc DL(St); |
| 12041 | SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); |
| 12042 | SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); |
| 12043 | for (unsigned i = 0; i < NumElems; ++i) |
| 12044 | ShuffleVec[i] = DAG.getDataLayout().isBigEndian() |
| 12045 | ? (i + 1) * SizeRatio - 1 |
| 12046 | : i * SizeRatio; |
| 12047 | |
| 12048 | // Can't shuffle using an illegal type. |
| 12049 | if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); |
| 12050 | |
| 12051 | SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec, |
| 12052 | DAG.getUNDEF(WideVec.getValueType()), |
| 12053 | ShuffleVec); |
| 12054 | // At this point all of the data is stored at the bottom of the |
| 12055 | // register. We now need to save it to mem. |
| 12056 | |
| 12057 | // Find the largest store unit |
| 12058 | MVT StoreType = MVT::i8; |
| 12059 | for (MVT Tp : MVT::integer_valuetypes()) { |
| 12060 | if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) |
| 12061 | StoreType = Tp; |
| 12062 | } |
| 12063 | // Didn't find a legal store type. |
| 12064 | if (!TLI.isTypeLegal(StoreType)) |
| 12065 | return SDValue(); |
| 12066 | |
| 12067 | // Bitcast the original vector into a vector of store-size units |
| 12068 | EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), |
| 12069 | StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); |
| 12070 | assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); |
| 12071 | SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); |
| 12072 | SmallVector<SDValue, 8> Chains; |
| 12073 | SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL, |
| 12074 | TLI.getPointerTy(DAG.getDataLayout())); |
| 12075 | SDValue BasePtr = St->getBasePtr(); |
| 12076 | |
| 12077 | // Perform one or more big stores into memory. |
| 12078 | unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits(); |
| 12079 | for (unsigned I = 0; I < E; I++) { |
| 12080 | SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, |
| 12081 | StoreType, ShuffWide, |
| 12082 | DAG.getIntPtrConstant(I, DL)); |
| 12083 | SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr, |
| 12084 | St->getPointerInfo(), St->getAlignment(), |
| 12085 | St->getMemOperand()->getFlags()); |
| 12086 | BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, |
| 12087 | Increment); |
| 12088 | Chains.push_back(Ch); |
| 12089 | } |
| 12090 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); |
| 12091 | } |
| 12092 | |
| 12093 | if (!ISD::isNormalStore(St)) |
| 12094 | return SDValue(); |
| 12095 | |
| 12096 | // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and |
| 12097 | // ARM stores of arguments in the same cache line. |
| 12098 | if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && |
| 12099 | StVal.getNode()->hasOneUse()) { |
| 12100 | SelectionDAG &DAG = DCI.DAG; |
| 12101 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 12102 | SDLoc DL(St); |
| 12103 | SDValue BasePtr = St->getBasePtr(); |
| 12104 | SDValue NewST1 = DAG.getStore( |
| 12105 | St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0), |
| 12106 | BasePtr, St->getPointerInfo(), St->getAlignment(), |
| 12107 | St->getMemOperand()->getFlags()); |
| 12108 | |
| 12109 | SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, |
| 12110 | DAG.getConstant(4, DL, MVT::i32)); |
| 12111 | return DAG.getStore(NewST1.getValue(0), DL, |
| 12112 | StVal.getNode()->getOperand(isBigEndian ? 0 : 1), |
| 12113 | OffsetPtr, St->getPointerInfo(), |
| 12114 | std::min(4U, St->getAlignment() / 2), |
| 12115 | St->getMemOperand()->getFlags()); |
| 12116 | } |
| 12117 | |
| 12118 | if (StVal.getValueType() == MVT::i64 && |
| 12119 | StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 12120 | |
| 12121 | // Bitcast an i64 store extracted from a vector to f64. |
| 12122 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
| 12123 | SelectionDAG &DAG = DCI.DAG; |
| 12124 | SDLoc dl(StVal); |
| 12125 | SDValue IntVec = StVal.getOperand(0); |
| 12126 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, |
| 12127 | IntVec.getValueType().getVectorNumElements()); |
| 12128 | SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); |
| 12129 | SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, |
| 12130 | Vec, StVal.getOperand(1)); |
| 12131 | dl = SDLoc(N); |
| 12132 | SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); |
| 12133 | // Make the DAGCombiner fold the bitcasts. |
| 12134 | DCI.AddToWorklist(Vec.getNode()); |
| 12135 | DCI.AddToWorklist(ExtElt.getNode()); |
| 12136 | DCI.AddToWorklist(V.getNode()); |
| 12137 | return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), |
| 12138 | St->getPointerInfo(), St->getAlignment(), |
| 12139 | St->getMemOperand()->getFlags(), St->getAAInfo()); |
| 12140 | } |
| 12141 | |
| 12142 | // If this is a legal vector store, try to combine it into a VST1_UPD. |
| 12143 | if (ISD::isNormalStore(N) && VT.isVector() && |
| 12144 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 12145 | return CombineBaseUpdate(N, DCI); |
| 12146 | |
| 12147 | return SDValue(); |
| 12148 | } |
| 12149 | |
| 12150 | /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) |
| 12151 | /// can replace combinations of VMUL and VCVT (floating-point to integer) |
| 12152 | /// when the VMUL has a constant operand that is a power of 2. |
| 12153 | /// |
| 12154 | /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): |
| 12155 | /// vmul.f32 d16, d17, d16 |
| 12156 | /// vcvt.s32.f32 d16, d16 |
| 12157 | /// becomes: |
| 12158 | /// vcvt.s32.f32 d16, d16, #3 |
| 12159 | static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, |
| 12160 | const ARMSubtarget *Subtarget) { |
| 12161 | if (!Subtarget->hasNEON()) |
| 12162 | return SDValue(); |
| 12163 | |
| 12164 | SDValue Op = N->getOperand(0); |
| 12165 | if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || |
| 12166 | Op.getOpcode() != ISD::FMUL) |
| 12167 | return SDValue(); |
| 12168 | |
| 12169 | SDValue ConstVec = Op->getOperand(1); |
| 12170 | if (!isa<BuildVectorSDNode>(ConstVec)) |
| 12171 | return SDValue(); |
| 12172 | |
| 12173 | MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); |
| 12174 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
| 12175 | MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); |
| 12176 | uint32_t IntBits = IntTy.getSizeInBits(); |
| 12177 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
| 12178 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { |
| 12179 | // These instructions only exist converting from f32 to i32. We can handle |
| 12180 | // smaller integers by generating an extra truncate, but larger ones would |
| 12181 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since |
| 12182 | // these intructions only support v2i32/v4i32 types. |
| 12183 | return SDValue(); |
| 12184 | } |
| 12185 | |
| 12186 | BitVector UndefElements; |
| 12187 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); |
| 12188 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); |
| 12189 | if (C == -1 || C == 0 || C > 32) |
| 12190 | return SDValue(); |
| 12191 | |
| 12192 | SDLoc dl(N); |
| 12193 | bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; |
| 12194 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : |
| 12195 | Intrinsic::arm_neon_vcvtfp2fxu; |
| 12196 | SDValue FixConv = DAG.getNode( |
| 12197 | ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, |
| 12198 | DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0), |
| 12199 | DAG.getConstant(C, dl, MVT::i32)); |
| 12200 | |
| 12201 | if (IntBits < FloatBits) |
| 12202 | FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv); |
| 12203 | |
| 12204 | return FixConv; |
| 12205 | } |
| 12206 | |
| 12207 | /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) |
| 12208 | /// can replace combinations of VCVT (integer to floating-point) and VDIV |
| 12209 | /// when the VDIV has a constant operand that is a power of 2. |
| 12210 | /// |
| 12211 | /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): |
| 12212 | /// vcvt.f32.s32 d16, d16 |
| 12213 | /// vdiv.f32 d16, d17, d16 |
| 12214 | /// becomes: |
| 12215 | /// vcvt.f32.s32 d16, d16, #3 |
| 12216 | static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, |
| 12217 | const ARMSubtarget *Subtarget) { |
| 12218 | if (!Subtarget->hasNEON()) |
| 12219 | return SDValue(); |
| 12220 | |
| 12221 | SDValue Op = N->getOperand(0); |
| 12222 | unsigned OpOpcode = Op.getNode()->getOpcode(); |
| 12223 | if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() || |
| 12224 | (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) |
| 12225 | return SDValue(); |
| 12226 | |
| 12227 | SDValue ConstVec = N->getOperand(1); |
| 12228 | if (!isa<BuildVectorSDNode>(ConstVec)) |
| 12229 | return SDValue(); |
| 12230 | |
| 12231 | MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); |
| 12232 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
| 12233 | MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); |
| 12234 | uint32_t IntBits = IntTy.getSizeInBits(); |
| 12235 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
| 12236 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { |
| 12237 | // These instructions only exist converting from i32 to f32. We can handle |
| 12238 | // smaller integers by generating an extra extend, but larger ones would |
| 12239 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since |
| 12240 | // these intructions only support v2i32/v4i32 types. |
| 12241 | return SDValue(); |
| 12242 | } |
| 12243 | |
| 12244 | BitVector UndefElements; |
| 12245 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); |
| 12246 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); |
| 12247 | if (C == -1 || C == 0 || C > 32) |
| 12248 | return SDValue(); |
| 12249 | |
| 12250 | SDLoc dl(N); |
| 12251 | bool isSigned = OpOpcode == ISD::SINT_TO_FP; |
| 12252 | SDValue ConvInput = Op.getOperand(0); |
| 12253 | if (IntBits < FloatBits) |
| 12254 | ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, |
| 12255 | dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, |
| 12256 | ConvInput); |
| 12257 | |
| 12258 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : |
| 12259 | Intrinsic::arm_neon_vcvtfxu2fp; |
| 12260 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, |
| 12261 | Op.getValueType(), |
| 12262 | DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), |
| 12263 | ConvInput, DAG.getConstant(C, dl, MVT::i32)); |
| 12264 | } |
| 12265 | |
| 12266 | /// Getvshiftimm - Check if this is a valid build_vector for the immediate |
| 12267 | /// operand of a vector shift operation, where all the elements of the |
| 12268 | /// build_vector must have the same constant integer value. |
| 12269 | static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { |
| 12270 | // Ignore bit_converts. |
| 12271 | while (Op.getOpcode() == ISD::BITCAST) |
| 12272 | Op = Op.getOperand(0); |
| 12273 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); |
| 12274 | APInt SplatBits, SplatUndef; |
| 12275 | unsigned SplatBitSize; |
| 12276 | bool HasAnyUndefs; |
| 12277 | if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, |
| 12278 | HasAnyUndefs, ElementBits) || |
| 12279 | SplatBitSize > ElementBits) |
| 12280 | return false; |
| 12281 | Cnt = SplatBits.getSExtValue(); |
| 12282 | return true; |
| 12283 | } |
| 12284 | |
| 12285 | /// isVShiftLImm - Check if this is a valid build_vector for the immediate |
| 12286 | /// operand of a vector shift left operation. That value must be in the range: |
| 12287 | /// 0 <= Value < ElementBits for a left shift; or |
| 12288 | /// 0 <= Value <= ElementBits for a long left shift. |
| 12289 | static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { |
| 12290 | assert(VT.isVector() && "vector shift count is not a vector type" ); |
| 12291 | int64_t ElementBits = VT.getScalarSizeInBits(); |
| 12292 | if (! getVShiftImm(Op, ElementBits, Cnt)) |
| 12293 | return false; |
| 12294 | return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); |
| 12295 | } |
| 12296 | |
| 12297 | /// isVShiftRImm - Check if this is a valid build_vector for the immediate |
| 12298 | /// operand of a vector shift right operation. For a shift opcode, the value |
| 12299 | /// is positive, but for an intrinsic the value count must be negative. The |
| 12300 | /// absolute value must be in the range: |
| 12301 | /// 1 <= |Value| <= ElementBits for a right shift; or |
| 12302 | /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. |
| 12303 | static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, |
| 12304 | int64_t &Cnt) { |
| 12305 | assert(VT.isVector() && "vector shift count is not a vector type" ); |
| 12306 | int64_t ElementBits = VT.getScalarSizeInBits(); |
| 12307 | if (! getVShiftImm(Op, ElementBits, Cnt)) |
| 12308 | return false; |
| 12309 | if (!isIntrinsic) |
| 12310 | return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); |
| 12311 | if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) { |
| 12312 | Cnt = -Cnt; |
| 12313 | return true; |
| 12314 | } |
| 12315 | return false; |
| 12316 | } |
| 12317 | |
| 12318 | /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. |
| 12319 | static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { |
| 12320 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); |
| 12321 | switch (IntNo) { |
| 12322 | default: |
| 12323 | // Don't do anything for most intrinsics. |
| 12324 | break; |
| 12325 | |
| 12326 | // Vector shifts: check for immediate versions and lower them. |
| 12327 | // Note: This is done during DAG combining instead of DAG legalizing because |
| 12328 | // the build_vectors for 64-bit vector element shift counts are generally |
| 12329 | // not legal, and it is hard to see their values after they get legalized to |
| 12330 | // loads from a constant pool. |
| 12331 | case Intrinsic::arm_neon_vshifts: |
| 12332 | case Intrinsic::arm_neon_vshiftu: |
| 12333 | case Intrinsic::arm_neon_vrshifts: |
| 12334 | case Intrinsic::arm_neon_vrshiftu: |
| 12335 | case Intrinsic::arm_neon_vrshiftn: |
| 12336 | case Intrinsic::arm_neon_vqshifts: |
| 12337 | case Intrinsic::arm_neon_vqshiftu: |
| 12338 | case Intrinsic::arm_neon_vqshiftsu: |
| 12339 | case Intrinsic::arm_neon_vqshiftns: |
| 12340 | case Intrinsic::arm_neon_vqshiftnu: |
| 12341 | case Intrinsic::arm_neon_vqshiftnsu: |
| 12342 | case Intrinsic::arm_neon_vqrshiftns: |
| 12343 | case Intrinsic::arm_neon_vqrshiftnu: |
| 12344 | case Intrinsic::arm_neon_vqrshiftnsu: { |
| 12345 | EVT VT = N->getOperand(1).getValueType(); |
| 12346 | int64_t Cnt; |
| 12347 | unsigned VShiftOpc = 0; |
| 12348 | |
| 12349 | switch (IntNo) { |
| 12350 | case Intrinsic::arm_neon_vshifts: |
| 12351 | case Intrinsic::arm_neon_vshiftu: |
| 12352 | if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { |
| 12353 | VShiftOpc = ARMISD::VSHL; |
| 12354 | break; |
| 12355 | } |
| 12356 | if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { |
| 12357 | VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? |
| 12358 | ARMISD::VSHRs : ARMISD::VSHRu); |
| 12359 | break; |
| 12360 | } |
| 12361 | return SDValue(); |
| 12362 | |
| 12363 | case Intrinsic::arm_neon_vrshifts: |
| 12364 | case Intrinsic::arm_neon_vrshiftu: |
| 12365 | if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) |
| 12366 | break; |
| 12367 | return SDValue(); |
| 12368 | |
| 12369 | case Intrinsic::arm_neon_vqshifts: |
| 12370 | case Intrinsic::arm_neon_vqshiftu: |
| 12371 | if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) |
| 12372 | break; |
| 12373 | return SDValue(); |
| 12374 | |
| 12375 | case Intrinsic::arm_neon_vqshiftsu: |
| 12376 | if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) |
| 12377 | break; |
| 12378 | llvm_unreachable("invalid shift count for vqshlu intrinsic" ); |
| 12379 | |
| 12380 | case Intrinsic::arm_neon_vrshiftn: |
| 12381 | case Intrinsic::arm_neon_vqshiftns: |
| 12382 | case Intrinsic::arm_neon_vqshiftnu: |
| 12383 | case Intrinsic::arm_neon_vqshiftnsu: |
| 12384 | case Intrinsic::arm_neon_vqrshiftns: |
| 12385 | case Intrinsic::arm_neon_vqrshiftnu: |
| 12386 | case Intrinsic::arm_neon_vqrshiftnsu: |
| 12387 | // Narrowing shifts require an immediate right shift. |
| 12388 | if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) |
| 12389 | break; |
| 12390 | llvm_unreachable("invalid shift count for narrowing vector shift " |
| 12391 | "intrinsic" ); |
| 12392 | |
| 12393 | default: |
| 12394 | llvm_unreachable("unhandled vector shift" ); |
| 12395 | } |
| 12396 | |
| 12397 | switch (IntNo) { |
| 12398 | case Intrinsic::arm_neon_vshifts: |
| 12399 | case Intrinsic::arm_neon_vshiftu: |
| 12400 | // Opcode already set above. |
| 12401 | break; |
| 12402 | case Intrinsic::arm_neon_vrshifts: |
| 12403 | VShiftOpc = ARMISD::VRSHRs; break; |
| 12404 | case Intrinsic::arm_neon_vrshiftu: |
| 12405 | VShiftOpc = ARMISD::VRSHRu; break; |
| 12406 | case Intrinsic::arm_neon_vrshiftn: |
| 12407 | VShiftOpc = ARMISD::VRSHRN; break; |
| 12408 | case Intrinsic::arm_neon_vqshifts: |
| 12409 | VShiftOpc = ARMISD::VQSHLs; break; |
| 12410 | case Intrinsic::arm_neon_vqshiftu: |
| 12411 | VShiftOpc = ARMISD::VQSHLu; break; |
| 12412 | case Intrinsic::arm_neon_vqshiftsu: |
| 12413 | VShiftOpc = ARMISD::VQSHLsu; break; |
| 12414 | case Intrinsic::arm_neon_vqshiftns: |
| 12415 | VShiftOpc = ARMISD::VQSHRNs; break; |
| 12416 | case Intrinsic::arm_neon_vqshiftnu: |
| 12417 | VShiftOpc = ARMISD::VQSHRNu; break; |
| 12418 | case Intrinsic::arm_neon_vqshiftnsu: |
| 12419 | VShiftOpc = ARMISD::VQSHRNsu; break; |
| 12420 | case Intrinsic::arm_neon_vqrshiftns: |
| 12421 | VShiftOpc = ARMISD::VQRSHRNs; break; |
| 12422 | case Intrinsic::arm_neon_vqrshiftnu: |
| 12423 | VShiftOpc = ARMISD::VQRSHRNu; break; |
| 12424 | case Intrinsic::arm_neon_vqrshiftnsu: |
| 12425 | VShiftOpc = ARMISD::VQRSHRNsu; break; |
| 12426 | } |
| 12427 | |
| 12428 | SDLoc dl(N); |
| 12429 | return DAG.getNode(VShiftOpc, dl, N->getValueType(0), |
| 12430 | N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32)); |
| 12431 | } |
| 12432 | |
| 12433 | case Intrinsic::arm_neon_vshiftins: { |
| 12434 | EVT VT = N->getOperand(1).getValueType(); |
| 12435 | int64_t Cnt; |
| 12436 | unsigned VShiftOpc = 0; |
| 12437 | |
| 12438 | if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) |
| 12439 | VShiftOpc = ARMISD::VSLI; |
| 12440 | else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) |
| 12441 | VShiftOpc = ARMISD::VSRI; |
| 12442 | else { |
| 12443 | llvm_unreachable("invalid shift count for vsli/vsri intrinsic" ); |
| 12444 | } |
| 12445 | |
| 12446 | SDLoc dl(N); |
| 12447 | return DAG.getNode(VShiftOpc, dl, N->getValueType(0), |
| 12448 | N->getOperand(1), N->getOperand(2), |
| 12449 | DAG.getConstant(Cnt, dl, MVT::i32)); |
| 12450 | } |
| 12451 | |
| 12452 | case Intrinsic::arm_neon_vqrshifts: |
| 12453 | case Intrinsic::arm_neon_vqrshiftu: |
| 12454 | // No immediate versions of these to check for. |
| 12455 | break; |
| 12456 | } |
| 12457 | |
| 12458 | return SDValue(); |
| 12459 | } |
| 12460 | |
| 12461 | /// PerformShiftCombine - Checks for immediate versions of vector shifts and |
| 12462 | /// lowers them. As with the vector shift intrinsics, this is done during DAG |
| 12463 | /// combining instead of DAG legalizing because the build_vectors for 64-bit |
| 12464 | /// vector element shift counts are generally not legal, and it is hard to see |
| 12465 | /// their values after they get legalized to loads from a constant pool. |
| 12466 | static SDValue PerformShiftCombine(SDNode *N, |
| 12467 | TargetLowering::DAGCombinerInfo &DCI, |
| 12468 | const ARMSubtarget *ST) { |
| 12469 | SelectionDAG &DAG = DCI.DAG; |
| 12470 | EVT VT = N->getValueType(0); |
| 12471 | if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) { |
| 12472 | // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high |
| 12473 | // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. |
| 12474 | SDValue N1 = N->getOperand(1); |
| 12475 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { |
| 12476 | SDValue N0 = N->getOperand(0); |
| 12477 | if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && |
| 12478 | DAG.MaskedValueIsZero(N0.getOperand(0), |
| 12479 | APInt::getHighBitsSet(32, 16))) |
| 12480 | return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1); |
| 12481 | } |
| 12482 | } |
| 12483 | |
| 12484 | if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 && |
| 12485 | N->getOperand(0)->getOpcode() == ISD::AND && |
| 12486 | N->getOperand(0)->hasOneUse()) { |
| 12487 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 12488 | return SDValue(); |
| 12489 | // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't |
| 12490 | // usually show up because instcombine prefers to canonicalize it to |
| 12491 | // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come |
| 12492 | // out of GEP lowering in some cases. |
| 12493 | SDValue N0 = N->getOperand(0); |
| 12494 | ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| 12495 | if (!ShiftAmtNode) |
| 12496 | return SDValue(); |
| 12497 | uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue()); |
| 12498 | ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(N0->getOperand(1)); |
| 12499 | if (!AndMaskNode) |
| 12500 | return SDValue(); |
| 12501 | uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue()); |
| 12502 | // Don't transform uxtb/uxth. |
| 12503 | if (AndMask == 255 || AndMask == 65535) |
| 12504 | return SDValue(); |
| 12505 | if (isMask_32(AndMask)) { |
| 12506 | uint32_t MaskedBits = countLeadingZeros(AndMask); |
| 12507 | if (MaskedBits > ShiftAmt) { |
| 12508 | SDLoc DL(N); |
| 12509 | SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), |
| 12510 | DAG.getConstant(MaskedBits, DL, MVT::i32)); |
| 12511 | return DAG.getNode( |
| 12512 | ISD::SRL, DL, MVT::i32, SHL, |
| 12513 | DAG.getConstant(MaskedBits - ShiftAmt, DL, MVT::i32)); |
| 12514 | } |
| 12515 | } |
| 12516 | } |
| 12517 | |
| 12518 | // Nothing to be done for scalar shifts. |
| 12519 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12520 | if (!VT.isVector() || !TLI.isTypeLegal(VT)) |
| 12521 | return SDValue(); |
| 12522 | |
| 12523 | assert(ST->hasNEON() && "unexpected vector shift" ); |
| 12524 | int64_t Cnt; |
| 12525 | |
| 12526 | switch (N->getOpcode()) { |
| 12527 | default: llvm_unreachable("unexpected shift opcode" ); |
| 12528 | |
| 12529 | case ISD::SHL: |
| 12530 | if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) { |
| 12531 | SDLoc dl(N); |
| 12532 | return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0), |
| 12533 | DAG.getConstant(Cnt, dl, MVT::i32)); |
| 12534 | } |
| 12535 | break; |
| 12536 | |
| 12537 | case ISD::SRA: |
| 12538 | case ISD::SRL: |
| 12539 | if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { |
| 12540 | unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? |
| 12541 | ARMISD::VSHRs : ARMISD::VSHRu); |
| 12542 | SDLoc dl(N); |
| 12543 | return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), |
| 12544 | DAG.getConstant(Cnt, dl, MVT::i32)); |
| 12545 | } |
| 12546 | } |
| 12547 | return SDValue(); |
| 12548 | } |
| 12549 | |
| 12550 | /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, |
| 12551 | /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. |
| 12552 | static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, |
| 12553 | const ARMSubtarget *ST) { |
| 12554 | SDValue N0 = N->getOperand(0); |
| 12555 | |
| 12556 | // Check for sign- and zero-extensions of vector extract operations of 8- |
| 12557 | // and 16-bit vector elements. NEON supports these directly. They are |
| 12558 | // handled during DAG combining because type legalization will promote them |
| 12559 | // to 32-bit types and it is messy to recognize the operations after that. |
| 12560 | if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 12561 | SDValue Vec = N0.getOperand(0); |
| 12562 | SDValue Lane = N0.getOperand(1); |
| 12563 | EVT VT = N->getValueType(0); |
| 12564 | EVT EltVT = N0.getValueType(); |
| 12565 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12566 | |
| 12567 | if (VT == MVT::i32 && |
| 12568 | (EltVT == MVT::i8 || EltVT == MVT::i16) && |
| 12569 | TLI.isTypeLegal(Vec.getValueType()) && |
| 12570 | isa<ConstantSDNode>(Lane)) { |
| 12571 | |
| 12572 | unsigned Opc = 0; |
| 12573 | switch (N->getOpcode()) { |
| 12574 | default: llvm_unreachable("unexpected opcode" ); |
| 12575 | case ISD::SIGN_EXTEND: |
| 12576 | Opc = ARMISD::VGETLANEs; |
| 12577 | break; |
| 12578 | case ISD::ZERO_EXTEND: |
| 12579 | case ISD::ANY_EXTEND: |
| 12580 | Opc = ARMISD::VGETLANEu; |
| 12581 | break; |
| 12582 | } |
| 12583 | return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane); |
| 12584 | } |
| 12585 | } |
| 12586 | |
| 12587 | return SDValue(); |
| 12588 | } |
| 12589 | |
| 12590 | static const APInt *isPowerOf2Constant(SDValue V) { |
| 12591 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); |
| 12592 | if (!C) |
| 12593 | return nullptr; |
| 12594 | const APInt *CV = &C->getAPIntValue(); |
| 12595 | return CV->isPowerOf2() ? CV : nullptr; |
| 12596 | } |
| 12597 | |
| 12598 | SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { |
| 12599 | // If we have a CMOV, OR and AND combination such as: |
| 12600 | // if (x & CN) |
| 12601 | // y |= CM; |
| 12602 | // |
| 12603 | // And: |
| 12604 | // * CN is a single bit; |
| 12605 | // * All bits covered by CM are known zero in y |
| 12606 | // |
| 12607 | // Then we can convert this into a sequence of BFI instructions. This will |
| 12608 | // always be a win if CM is a single bit, will always be no worse than the |
| 12609 | // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is |
| 12610 | // three bits (due to the extra IT instruction). |
| 12611 | |
| 12612 | SDValue Op0 = CMOV->getOperand(0); |
| 12613 | SDValue Op1 = CMOV->getOperand(1); |
| 12614 | auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2)); |
| 12615 | auto CC = CCNode->getAPIntValue().getLimitedValue(); |
| 12616 | SDValue CmpZ = CMOV->getOperand(4); |
| 12617 | |
| 12618 | // The compare must be against zero. |
| 12619 | if (!isNullConstant(CmpZ->getOperand(1))) |
| 12620 | return SDValue(); |
| 12621 | |
| 12622 | assert(CmpZ->getOpcode() == ARMISD::CMPZ); |
| 12623 | SDValue And = CmpZ->getOperand(0); |
| 12624 | if (And->getOpcode() != ISD::AND) |
| 12625 | return SDValue(); |
| 12626 | const APInt *AndC = isPowerOf2Constant(And->getOperand(1)); |
| 12627 | if (!AndC) |
| 12628 | return SDValue(); |
| 12629 | SDValue X = And->getOperand(0); |
| 12630 | |
| 12631 | if (CC == ARMCC::EQ) { |
| 12632 | // We're performing an "equal to zero" compare. Swap the operands so we |
| 12633 | // canonicalize on a "not equal to zero" compare. |
| 12634 | std::swap(Op0, Op1); |
| 12635 | } else { |
| 12636 | assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?" ); |
| 12637 | } |
| 12638 | |
| 12639 | if (Op1->getOpcode() != ISD::OR) |
| 12640 | return SDValue(); |
| 12641 | |
| 12642 | ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1)); |
| 12643 | if (!OrC) |
| 12644 | return SDValue(); |
| 12645 | SDValue Y = Op1->getOperand(0); |
| 12646 | |
| 12647 | if (Op0 != Y) |
| 12648 | return SDValue(); |
| 12649 | |
| 12650 | // Now, is it profitable to continue? |
| 12651 | APInt OrCI = OrC->getAPIntValue(); |
| 12652 | unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; |
| 12653 | if (OrCI.countPopulation() > Heuristic) |
| 12654 | return SDValue(); |
| 12655 | |
| 12656 | // Lastly, can we determine that the bits defined by OrCI |
| 12657 | // are zero in Y? |
| 12658 | KnownBits Known = DAG.computeKnownBits(Y); |
| 12659 | if ((OrCI & Known.Zero) != OrCI) |
| 12660 | return SDValue(); |
| 12661 | |
| 12662 | // OK, we can do the combine. |
| 12663 | SDValue V = Y; |
| 12664 | SDLoc dl(X); |
| 12665 | EVT VT = X.getValueType(); |
| 12666 | unsigned BitInX = AndC->logBase2(); |
| 12667 | |
| 12668 | if (BitInX != 0) { |
| 12669 | // We must shift X first. |
| 12670 | X = DAG.getNode(ISD::SRL, dl, VT, X, |
| 12671 | DAG.getConstant(BitInX, dl, VT)); |
| 12672 | } |
| 12673 | |
| 12674 | for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); |
| 12675 | BitInY < NumActiveBits; ++BitInY) { |
| 12676 | if (OrCI[BitInY] == 0) |
| 12677 | continue; |
| 12678 | APInt Mask(VT.getSizeInBits(), 0); |
| 12679 | Mask.setBit(BitInY); |
| 12680 | V = DAG.getNode(ARMISD::BFI, dl, VT, V, X, |
| 12681 | // Confusingly, the operand is an *inverted* mask. |
| 12682 | DAG.getConstant(~Mask, dl, VT)); |
| 12683 | } |
| 12684 | |
| 12685 | return V; |
| 12686 | } |
| 12687 | |
| 12688 | /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. |
| 12689 | SDValue |
| 12690 | ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { |
| 12691 | SDValue Cmp = N->getOperand(4); |
| 12692 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
| 12693 | // Only looking at NE cases. |
| 12694 | return SDValue(); |
| 12695 | |
| 12696 | EVT VT = N->getValueType(0); |
| 12697 | SDLoc dl(N); |
| 12698 | SDValue LHS = Cmp.getOperand(0); |
| 12699 | SDValue RHS = Cmp.getOperand(1); |
| 12700 | SDValue Chain = N->getOperand(0); |
| 12701 | SDValue BB = N->getOperand(1); |
| 12702 | SDValue ARMcc = N->getOperand(2); |
| 12703 | ARMCC::CondCodes CC = |
| 12704 | (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); |
| 12705 | |
| 12706 | // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0)) |
| 12707 | // -> (brcond Chain BB CC CPSR Cmp) |
| 12708 | if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && |
| 12709 | LHS->getOperand(0)->getOpcode() == ARMISD::CMOV && |
| 12710 | LHS->getOperand(0)->hasOneUse()) { |
| 12711 | auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0)); |
| 12712 | auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1)); |
| 12713 | auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); |
| 12714 | auto *RHSC = dyn_cast<ConstantSDNode>(RHS); |
| 12715 | if ((LHS00C && LHS00C->getZExtValue() == 0) && |
| 12716 | (LHS01C && LHS01C->getZExtValue() == 1) && |
| 12717 | (LHS1C && LHS1C->getZExtValue() == 1) && |
| 12718 | (RHSC && RHSC->getZExtValue() == 0)) { |
| 12719 | return DAG.getNode( |
| 12720 | ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2), |
| 12721 | LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4)); |
| 12722 | } |
| 12723 | } |
| 12724 | |
| 12725 | return SDValue(); |
| 12726 | } |
| 12727 | |
| 12728 | /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. |
| 12729 | SDValue |
| 12730 | ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { |
| 12731 | SDValue Cmp = N->getOperand(4); |
| 12732 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
| 12733 | // Only looking at EQ and NE cases. |
| 12734 | return SDValue(); |
| 12735 | |
| 12736 | EVT VT = N->getValueType(0); |
| 12737 | SDLoc dl(N); |
| 12738 | SDValue LHS = Cmp.getOperand(0); |
| 12739 | SDValue RHS = Cmp.getOperand(1); |
| 12740 | SDValue FalseVal = N->getOperand(0); |
| 12741 | SDValue TrueVal = N->getOperand(1); |
| 12742 | SDValue ARMcc = N->getOperand(2); |
| 12743 | ARMCC::CondCodes CC = |
| 12744 | (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); |
| 12745 | |
| 12746 | // BFI is only available on V6T2+. |
| 12747 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { |
| 12748 | SDValue R = PerformCMOVToBFICombine(N, DAG); |
| 12749 | if (R) |
| 12750 | return R; |
| 12751 | } |
| 12752 | |
| 12753 | // Simplify |
| 12754 | // mov r1, r0 |
| 12755 | // cmp r1, x |
| 12756 | // mov r0, y |
| 12757 | // moveq r0, x |
| 12758 | // to |
| 12759 | // cmp r0, x |
| 12760 | // movne r0, y |
| 12761 | // |
| 12762 | // mov r1, r0 |
| 12763 | // cmp r1, x |
| 12764 | // mov r0, x |
| 12765 | // movne r0, y |
| 12766 | // to |
| 12767 | // cmp r0, x |
| 12768 | // movne r0, y |
| 12769 | /// FIXME: Turn this into a target neutral optimization? |
| 12770 | SDValue Res; |
| 12771 | if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { |
| 12772 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, |
| 12773 | N->getOperand(3), Cmp); |
| 12774 | } else if (CC == ARMCC::EQ && TrueVal == RHS) { |
| 12775 | SDValue ARMcc; |
| 12776 | SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); |
| 12777 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, |
| 12778 | N->getOperand(3), NewCmp); |
| 12779 | } |
| 12780 | |
| 12781 | // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0)) |
| 12782 | // -> (cmov F T CC CPSR Cmp) |
| 12783 | if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) { |
| 12784 | auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)); |
| 12785 | auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); |
| 12786 | auto *RHSC = dyn_cast<ConstantSDNode>(RHS); |
| 12787 | if ((LHS0C && LHS0C->getZExtValue() == 0) && |
| 12788 | (LHS1C && LHS1C->getZExtValue() == 1) && |
| 12789 | (RHSC && RHSC->getZExtValue() == 0)) { |
| 12790 | return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, |
| 12791 | LHS->getOperand(2), LHS->getOperand(3), |
| 12792 | LHS->getOperand(4)); |
| 12793 | } |
| 12794 | } |
| 12795 | |
| 12796 | if (!VT.isInteger()) |
| 12797 | return SDValue(); |
| 12798 | |
| 12799 | // Materialize a boolean comparison for integers so we can avoid branching. |
| 12800 | if (isNullConstant(FalseVal)) { |
| 12801 | if (CC == ARMCC::EQ && isOneConstant(TrueVal)) { |
| 12802 | if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) { |
| 12803 | // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it |
| 12804 | // right 5 bits will make that 32 be 1, otherwise it will be 0. |
| 12805 | // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5 |
| 12806 | SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); |
| 12807 | Res = DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::CTLZ, dl, VT, Sub), |
| 12808 | DAG.getConstant(5, dl, MVT::i32)); |
| 12809 | } else { |
| 12810 | // CMOV 0, 1, ==, (CMPZ x, y) -> |
| 12811 | // (ADDCARRY (SUB x, y), t:0, t:1) |
| 12812 | // where t = (SUBCARRY 0, (SUB x, y), 0) |
| 12813 | // |
| 12814 | // The SUBCARRY computes 0 - (x - y) and this will give a borrow when |
| 12815 | // x != y. In other words, a carry C == 1 when x == y, C == 0 |
| 12816 | // otherwise. |
| 12817 | // The final ADDCARRY computes |
| 12818 | // x - y + (0 - (x - y)) + C == C |
| 12819 | SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); |
| 12820 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
| 12821 | SDValue Neg = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, Sub); |
| 12822 | // ISD::SUBCARRY returns a borrow but we want the carry here |
| 12823 | // actually. |
| 12824 | SDValue Carry = |
| 12825 | DAG.getNode(ISD::SUB, dl, MVT::i32, |
| 12826 | DAG.getConstant(1, dl, MVT::i32), Neg.getValue(1)); |
| 12827 | Res = DAG.getNode(ISD::ADDCARRY, dl, VTs, Sub, Neg, Carry); |
| 12828 | } |
| 12829 | } else if (CC == ARMCC::NE && !isNullConstant(RHS) && |
| 12830 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(TrueVal))) { |
| 12831 | // This seems pointless but will allow us to combine it further below. |
| 12832 | // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1 |
| 12833 | SDValue Sub = |
| 12834 | DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS); |
| 12835 | SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, |
| 12836 | Sub.getValue(1), SDValue()); |
| 12837 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, TrueVal, ARMcc, |
| 12838 | N->getOperand(3), CPSRGlue.getValue(1)); |
| 12839 | FalseVal = Sub; |
| 12840 | } |
| 12841 | } else if (isNullConstant(TrueVal)) { |
| 12842 | if (CC == ARMCC::EQ && !isNullConstant(RHS) && |
| 12843 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(FalseVal))) { |
| 12844 | // This seems pointless but will allow us to combine it further below |
| 12845 | // Note that we change == for != as this is the dual for the case above. |
| 12846 | // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1 |
| 12847 | SDValue Sub = |
| 12848 | DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS); |
| 12849 | SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, |
| 12850 | Sub.getValue(1), SDValue()); |
| 12851 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, FalseVal, |
| 12852 | DAG.getConstant(ARMCC::NE, dl, MVT::i32), |
| 12853 | N->getOperand(3), CPSRGlue.getValue(1)); |
| 12854 | FalseVal = Sub; |
| 12855 | } |
| 12856 | } |
| 12857 | |
| 12858 | // On Thumb1, the DAG above may be further combined if z is a power of 2 |
| 12859 | // (z == 2 ^ K). |
| 12860 | // CMOV (SUBS x, y), z, !=, (SUBS x, y):1 -> |
| 12861 | // t1 = (USUBO (SUB x, y), 1) |
| 12862 | // t2 = (SUBCARRY (SUB x, y), t1:0, t1:1) |
| 12863 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 |
| 12864 | // |
| 12865 | // This also handles the special case of comparing against zero; it's |
| 12866 | // essentially, the same pattern, except there's no SUBS: |
| 12867 | // CMOV x, z, !=, (CMPZ x, 0) -> |
| 12868 | // t1 = (USUBO x, 1) |
| 12869 | // t2 = (SUBCARRY x, t1:0, t1:1) |
| 12870 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 |
| 12871 | const APInt *TrueConst; |
| 12872 | if (Subtarget->isThumb1Only() && CC == ARMCC::NE && |
| 12873 | ((FalseVal.getOpcode() == ARMISD::SUBS && |
| 12874 | FalseVal.getOperand(0) == LHS && FalseVal.getOperand(1) == RHS) || |
| 12875 | (FalseVal == LHS && isNullConstant(RHS))) && |
| 12876 | (TrueConst = isPowerOf2Constant(TrueVal))) { |
| 12877 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
| 12878 | unsigned ShiftAmount = TrueConst->logBase2(); |
| 12879 | if (ShiftAmount) |
| 12880 | TrueVal = DAG.getConstant(1, dl, VT); |
| 12881 | SDValue Subc = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, TrueVal); |
| 12882 | Res = DAG.getNode(ISD::SUBCARRY, dl, VTs, FalseVal, Subc, Subc.getValue(1)); |
| 12883 | |
| 12884 | if (ShiftAmount) |
| 12885 | Res = DAG.getNode(ISD::SHL, dl, VT, Res, |
| 12886 | DAG.getConstant(ShiftAmount, dl, MVT::i32)); |
| 12887 | } |
| 12888 | |
| 12889 | if (Res.getNode()) { |
| 12890 | KnownBits Known = DAG.computeKnownBits(SDValue(N,0)); |
| 12891 | // Capture demanded bits information that would be otherwise lost. |
| 12892 | if (Known.Zero == 0xfffffffe) |
| 12893 | Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, |
| 12894 | DAG.getValueType(MVT::i1)); |
| 12895 | else if (Known.Zero == 0xffffff00) |
| 12896 | Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, |
| 12897 | DAG.getValueType(MVT::i8)); |
| 12898 | else if (Known.Zero == 0xffff0000) |
| 12899 | Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, |
| 12900 | DAG.getValueType(MVT::i16)); |
| 12901 | } |
| 12902 | |
| 12903 | return Res; |
| 12904 | } |
| 12905 | |
| 12906 | SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, |
| 12907 | DAGCombinerInfo &DCI) const { |
| 12908 | switch (N->getOpcode()) { |
| 12909 | default: break; |
| 12910 | case ISD::ABS: return PerformABSCombine(N, DCI, Subtarget); |
| 12911 | case ARMISD::ADDE: return PerformADDECombine(N, DCI, Subtarget); |
| 12912 | case ARMISD::UMLAL: return PerformUMLALCombine(N, DCI.DAG, Subtarget); |
| 12913 | case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); |
| 12914 | case ISD::SUB: return PerformSUBCombine(N, DCI); |
| 12915 | case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); |
| 12916 | case ISD::OR: return PerformORCombine(N, DCI, Subtarget); |
| 12917 | case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); |
| 12918 | case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); |
| 12919 | case ARMISD::ADDC: |
| 12920 | case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI, Subtarget); |
| 12921 | case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI, Subtarget); |
| 12922 | case ARMISD::BFI: return PerformBFICombine(N, DCI); |
| 12923 | case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); |
| 12924 | case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); |
| 12925 | case ISD::STORE: return PerformSTORECombine(N, DCI); |
| 12926 | case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); |
| 12927 | case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); |
| 12928 | case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); |
| 12929 | case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); |
| 12930 | case ARMISD::VDUP: return PerformVDUPCombine(N, DCI); |
| 12931 | case ISD::FP_TO_SINT: |
| 12932 | case ISD::FP_TO_UINT: |
| 12933 | return PerformVCVTCombine(N, DCI.DAG, Subtarget); |
| 12934 | case ISD::FDIV: |
| 12935 | return PerformVDIVCombine(N, DCI.DAG, Subtarget); |
| 12936 | case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); |
| 12937 | case ISD::SHL: |
| 12938 | case ISD::SRA: |
| 12939 | case ISD::SRL: |
| 12940 | return PerformShiftCombine(N, DCI, Subtarget); |
| 12941 | case ISD::SIGN_EXTEND: |
| 12942 | case ISD::ZERO_EXTEND: |
| 12943 | case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); |
| 12944 | case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); |
| 12945 | case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG); |
| 12946 | case ISD::LOAD: return PerformLOADCombine(N, DCI); |
| 12947 | case ARMISD::VLD1DUP: |
| 12948 | case ARMISD::VLD2DUP: |
| 12949 | case ARMISD::VLD3DUP: |
| 12950 | case ARMISD::VLD4DUP: |
| 12951 | return PerformVLDCombine(N, DCI); |
| 12952 | case ARMISD::BUILD_VECTOR: |
| 12953 | return PerformARMBUILD_VECTORCombine(N, DCI); |
| 12954 | case ARMISD::SMULWB: { |
| 12955 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); |
| 12956 | APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); |
| 12957 | if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) |
| 12958 | return SDValue(); |
| 12959 | break; |
| 12960 | } |
| 12961 | case ARMISD::SMULWT: { |
| 12962 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); |
| 12963 | APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); |
| 12964 | if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) |
| 12965 | return SDValue(); |
| 12966 | break; |
| 12967 | } |
| 12968 | case ARMISD::SMLALBB: { |
| 12969 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); |
| 12970 | APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); |
| 12971 | if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || |
| 12972 | (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) |
| 12973 | return SDValue(); |
| 12974 | break; |
| 12975 | } |
| 12976 | case ARMISD::SMLALBT: { |
| 12977 | unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits(); |
| 12978 | APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); |
| 12979 | unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits(); |
| 12980 | APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); |
| 12981 | if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) || |
| 12982 | (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI))) |
| 12983 | return SDValue(); |
| 12984 | break; |
| 12985 | } |
| 12986 | case ARMISD::SMLALTB: { |
| 12987 | unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits(); |
| 12988 | APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); |
| 12989 | unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits(); |
| 12990 | APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); |
| 12991 | if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) || |
| 12992 | (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI))) |
| 12993 | return SDValue(); |
| 12994 | break; |
| 12995 | } |
| 12996 | case ARMISD::SMLALTT: { |
| 12997 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); |
| 12998 | APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); |
| 12999 | if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || |
| 13000 | (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) |
| 13001 | return SDValue(); |
| 13002 | break; |
| 13003 | } |
| 13004 | case ISD::INTRINSIC_VOID: |
| 13005 | case ISD::INTRINSIC_W_CHAIN: |
| 13006 | switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { |
| 13007 | case Intrinsic::arm_neon_vld1: |
| 13008 | case Intrinsic::arm_neon_vld1x2: |
| 13009 | case Intrinsic::arm_neon_vld1x3: |
| 13010 | case Intrinsic::arm_neon_vld1x4: |
| 13011 | case Intrinsic::arm_neon_vld2: |
| 13012 | case Intrinsic::arm_neon_vld3: |
| 13013 | case Intrinsic::arm_neon_vld4: |
| 13014 | case Intrinsic::arm_neon_vld2lane: |
| 13015 | case Intrinsic::arm_neon_vld3lane: |
| 13016 | case Intrinsic::arm_neon_vld4lane: |
| 13017 | case Intrinsic::arm_neon_vld2dup: |
| 13018 | case Intrinsic::arm_neon_vld3dup: |
| 13019 | case Intrinsic::arm_neon_vld4dup: |
| 13020 | case Intrinsic::arm_neon_vst1: |
| 13021 | case Intrinsic::arm_neon_vst1x2: |
| 13022 | case Intrinsic::arm_neon_vst1x3: |
| 13023 | case Intrinsic::arm_neon_vst1x4: |
| 13024 | case Intrinsic::arm_neon_vst2: |
| 13025 | case Intrinsic::arm_neon_vst3: |
| 13026 | case Intrinsic::arm_neon_vst4: |
| 13027 | case Intrinsic::arm_neon_vst2lane: |
| 13028 | case Intrinsic::arm_neon_vst3lane: |
| 13029 | case Intrinsic::arm_neon_vst4lane: |
| 13030 | return PerformVLDCombine(N, DCI); |
| 13031 | default: break; |
| 13032 | } |
| 13033 | break; |
| 13034 | } |
| 13035 | return SDValue(); |
| 13036 | } |
| 13037 | |
| 13038 | bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, |
| 13039 | EVT VT) const { |
| 13040 | return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); |
| 13041 | } |
| 13042 | |
| 13043 | bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, |
| 13044 | unsigned, |
| 13045 | MachineMemOperand::Flags, |
| 13046 | bool *Fast) const { |
| 13047 | // Depends what it gets converted into if the type is weird. |
| 13048 | if (!VT.isSimple()) |
| 13049 | return false; |
| 13050 | |
| 13051 | // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus |
| 13052 | bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); |
| 13053 | |
| 13054 | switch (VT.getSimpleVT().SimpleTy) { |
| 13055 | default: |
| 13056 | return false; |
| 13057 | case MVT::i8: |
| 13058 | case MVT::i16: |
| 13059 | case MVT::i32: { |
| 13060 | // Unaligned access can use (for example) LRDB, LRDH, LDR |
| 13061 | if (AllowsUnaligned) { |
| 13062 | if (Fast) |
| 13063 | *Fast = Subtarget->hasV7Ops(); |
| 13064 | return true; |
| 13065 | } |
| 13066 | return false; |
| 13067 | } |
| 13068 | case MVT::f64: |
| 13069 | case MVT::v2f64: { |
| 13070 | // For any little-endian targets with neon, we can support unaligned ld/st |
| 13071 | // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. |
| 13072 | // A big-endian target may also explicitly support unaligned accesses |
| 13073 | if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { |
| 13074 | if (Fast) |
| 13075 | *Fast = true; |
| 13076 | return true; |
| 13077 | } |
| 13078 | return false; |
| 13079 | } |
| 13080 | } |
| 13081 | } |
| 13082 | |
| 13083 | static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, |
| 13084 | unsigned AlignCheck) { |
| 13085 | return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && |
| 13086 | (DstAlign == 0 || DstAlign % AlignCheck == 0)); |
| 13087 | } |
| 13088 | |
| 13089 | EVT ARMTargetLowering::getOptimalMemOpType( |
| 13090 | uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, |
| 13091 | bool ZeroMemset, bool MemcpyStrSrc, |
| 13092 | const AttributeList &FuncAttributes) const { |
| 13093 | // See if we can use NEON instructions for this... |
| 13094 | if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() && |
| 13095 | !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) { |
| 13096 | bool Fast; |
| 13097 | if (Size >= 16 && |
| 13098 | (memOpAlign(SrcAlign, DstAlign, 16) || |
| 13099 | (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, |
| 13100 | MachineMemOperand::MONone, &Fast) && |
| 13101 | Fast))) { |
| 13102 | return MVT::v2f64; |
| 13103 | } else if (Size >= 8 && |
| 13104 | (memOpAlign(SrcAlign, DstAlign, 8) || |
| 13105 | (allowsMisalignedMemoryAccesses( |
| 13106 | MVT::f64, 0, 1, MachineMemOperand::MONone, &Fast) && |
| 13107 | Fast))) { |
| 13108 | return MVT::f64; |
| 13109 | } |
| 13110 | } |
| 13111 | |
| 13112 | // Let the target-independent logic figure it out. |
| 13113 | return MVT::Other; |
| 13114 | } |
| 13115 | |
| 13116 | // 64-bit integers are split into their high and low parts and held in two |
| 13117 | // different registers, so the trunc is free since the low register can just |
| 13118 | // be used. |
| 13119 | bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { |
| 13120 | if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) |
| 13121 | return false; |
| 13122 | unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); |
| 13123 | unsigned DestBits = DstTy->getPrimitiveSizeInBits(); |
| 13124 | return (SrcBits == 64 && DestBits == 32); |
| 13125 | } |
| 13126 | |
| 13127 | bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { |
| 13128 | if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() || |
| 13129 | !DstVT.isInteger()) |
| 13130 | return false; |
| 13131 | unsigned SrcBits = SrcVT.getSizeInBits(); |
| 13132 | unsigned DestBits = DstVT.getSizeInBits(); |
| 13133 | return (SrcBits == 64 && DestBits == 32); |
| 13134 | } |
| 13135 | |
| 13136 | bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { |
| 13137 | if (Val.getOpcode() != ISD::LOAD) |
| 13138 | return false; |
| 13139 | |
| 13140 | EVT VT1 = Val.getValueType(); |
| 13141 | if (!VT1.isSimple() || !VT1.isInteger() || |
| 13142 | !VT2.isSimple() || !VT2.isInteger()) |
| 13143 | return false; |
| 13144 | |
| 13145 | switch (VT1.getSimpleVT().SimpleTy) { |
| 13146 | default: break; |
| 13147 | case MVT::i1: |
| 13148 | case MVT::i8: |
| 13149 | case MVT::i16: |
| 13150 | // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. |
| 13151 | return true; |
| 13152 | } |
| 13153 | |
| 13154 | return false; |
| 13155 | } |
| 13156 | |
| 13157 | bool ARMTargetLowering::isFNegFree(EVT VT) const { |
| 13158 | if (!VT.isSimple()) |
| 13159 | return false; |
| 13160 | |
| 13161 | // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that |
| 13162 | // negate values directly (fneg is free). So, we don't want to let the DAG |
| 13163 | // combiner rewrite fneg into xors and some other instructions. For f16 and |
| 13164 | // FullFP16 argument passing, some bitcast nodes may be introduced, |
| 13165 | // triggering this DAG combine rewrite, so we are avoiding that with this. |
| 13166 | switch (VT.getSimpleVT().SimpleTy) { |
| 13167 | default: break; |
| 13168 | case MVT::f16: |
| 13169 | return Subtarget->hasFullFP16(); |
| 13170 | } |
| 13171 | |
| 13172 | return false; |
| 13173 | } |
| 13174 | |
| 13175 | /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth |
| 13176 | /// of the vector elements. |
| 13177 | static bool (Value *Ext1, Value *Ext2) { |
| 13178 | auto areExtDoubled = [](Instruction *Ext) { |
| 13179 | return Ext->getType()->getScalarSizeInBits() == |
| 13180 | 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits(); |
| 13181 | }; |
| 13182 | |
| 13183 | if (!match(Ext1, m_ZExtOrSExt(m_Value())) || |
| 13184 | !match(Ext2, m_ZExtOrSExt(m_Value())) || |
| 13185 | !areExtDoubled(cast<Instruction>(Ext1)) || |
| 13186 | !areExtDoubled(cast<Instruction>(Ext2))) |
| 13187 | return false; |
| 13188 | |
| 13189 | return true; |
| 13190 | } |
| 13191 | |
| 13192 | /// Check if sinking \p I's operands to I's basic block is profitable, because |
| 13193 | /// the operands can be folded into a target instruction, e.g. |
| 13194 | /// sext/zext can be folded into vsubl. |
| 13195 | bool ARMTargetLowering::shouldSinkOperands(Instruction *I, |
| 13196 | SmallVectorImpl<Use *> &Ops) const { |
| 13197 | if (!Subtarget->hasNEON() || !I->getType()->isVectorTy()) |
| 13198 | return false; |
| 13199 | |
| 13200 | switch (I->getOpcode()) { |
| 13201 | case Instruction::Sub: |
| 13202 | case Instruction::Add: { |
| 13203 | if (!areExtractExts(I->getOperand(0), I->getOperand(1))) |
| 13204 | return false; |
| 13205 | Ops.push_back(&I->getOperandUse(0)); |
| 13206 | Ops.push_back(&I->getOperandUse(1)); |
| 13207 | return true; |
| 13208 | } |
| 13209 | default: |
| 13210 | return false; |
| 13211 | } |
| 13212 | return false; |
| 13213 | } |
| 13214 | |
| 13215 | bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { |
| 13216 | EVT VT = ExtVal.getValueType(); |
| 13217 | |
| 13218 | if (!isTypeLegal(VT)) |
| 13219 | return false; |
| 13220 | |
| 13221 | // Don't create a loadext if we can fold the extension into a wide/long |
| 13222 | // instruction. |
| 13223 | // If there's more than one user instruction, the loadext is desirable no |
| 13224 | // matter what. There can be two uses by the same instruction. |
| 13225 | if (ExtVal->use_empty() || |
| 13226 | !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) |
| 13227 | return true; |
| 13228 | |
| 13229 | SDNode *U = *ExtVal->use_begin(); |
| 13230 | if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || |
| 13231 | U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL)) |
| 13232 | return false; |
| 13233 | |
| 13234 | return true; |
| 13235 | } |
| 13236 | |
| 13237 | bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { |
| 13238 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) |
| 13239 | return false; |
| 13240 | |
| 13241 | if (!isTypeLegal(EVT::getEVT(Ty1))) |
| 13242 | return false; |
| 13243 | |
| 13244 | assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop" ); |
| 13245 | |
| 13246 | // Assuming the caller doesn't have a zeroext or signext return parameter, |
| 13247 | // truncation all the way down to i1 is valid. |
| 13248 | return true; |
| 13249 | } |
| 13250 | |
| 13251 | int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL, |
| 13252 | const AddrMode &AM, Type *Ty, |
| 13253 | unsigned AS) const { |
| 13254 | if (isLegalAddressingMode(DL, AM, Ty, AS)) { |
| 13255 | if (Subtarget->hasFPAO()) |
| 13256 | return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster |
| 13257 | return 0; |
| 13258 | } |
| 13259 | return -1; |
| 13260 | } |
| 13261 | |
| 13262 | static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { |
| 13263 | if (V < 0) |
| 13264 | return false; |
| 13265 | |
| 13266 | unsigned Scale = 1; |
| 13267 | switch (VT.getSimpleVT().SimpleTy) { |
| 13268 | case MVT::i1: |
| 13269 | case MVT::i8: |
| 13270 | // Scale == 1; |
| 13271 | break; |
| 13272 | case MVT::i16: |
| 13273 | // Scale == 2; |
| 13274 | Scale = 2; |
| 13275 | break; |
| 13276 | default: |
| 13277 | // On thumb1 we load most things (i32, i64, floats, etc) with a LDR |
| 13278 | // Scale == 4; |
| 13279 | Scale = 4; |
| 13280 | break; |
| 13281 | } |
| 13282 | |
| 13283 | if ((V & (Scale - 1)) != 0) |
| 13284 | return false; |
| 13285 | return isUInt<5>(V / Scale); |
| 13286 | } |
| 13287 | |
| 13288 | static bool isLegalT2AddressImmediate(int64_t V, EVT VT, |
| 13289 | const ARMSubtarget *Subtarget) { |
| 13290 | if (!VT.isInteger() && !VT.isFloatingPoint()) |
| 13291 | return false; |
| 13292 | if (VT.isVector() && Subtarget->hasNEON()) |
| 13293 | return false; |
| 13294 | if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() && |
| 13295 | !Subtarget->hasMVEFloatOps()) |
| 13296 | return false; |
| 13297 | |
| 13298 | bool IsNeg = false; |
| 13299 | if (V < 0) { |
| 13300 | IsNeg = true; |
| 13301 | V = -V; |
| 13302 | } |
| 13303 | |
| 13304 | unsigned NumBytes = std::max(VT.getSizeInBits() / 8, 1U); |
| 13305 | |
| 13306 | // MVE: size * imm7 |
| 13307 | if (VT.isVector() && Subtarget->hasMVEIntegerOps()) { |
| 13308 | switch (VT.getSimpleVT().getVectorElementType().SimpleTy) { |
| 13309 | case MVT::i32: |
| 13310 | case MVT::f32: |
| 13311 | return isShiftedUInt<7,2>(V); |
| 13312 | case MVT::i16: |
| 13313 | case MVT::f16: |
| 13314 | return isShiftedUInt<7,1>(V); |
| 13315 | case MVT::i8: |
| 13316 | return isUInt<7>(V); |
| 13317 | default: |
| 13318 | return false; |
| 13319 | } |
| 13320 | } |
| 13321 | |
| 13322 | // half VLDR: 2 * imm8 |
| 13323 | if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16()) |
| 13324 | return isShiftedUInt<8, 1>(V); |
| 13325 | // VLDR and LDRD: 4 * imm8 |
| 13326 | if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8) |
| 13327 | return isShiftedUInt<8, 2>(V); |
| 13328 | |
| 13329 | if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) { |
| 13330 | // + imm12 or - imm8 |
| 13331 | if (IsNeg) |
| 13332 | return isUInt<8>(V); |
| 13333 | return isUInt<12>(V); |
| 13334 | } |
| 13335 | |
| 13336 | return false; |
| 13337 | } |
| 13338 | |
| 13339 | /// isLegalAddressImmediate - Return true if the integer value can be used |
| 13340 | /// as the offset of the target addressing mode for load / store of the |
| 13341 | /// given type. |
| 13342 | static bool isLegalAddressImmediate(int64_t V, EVT VT, |
| 13343 | const ARMSubtarget *Subtarget) { |
| 13344 | if (V == 0) |
| 13345 | return true; |
| 13346 | |
| 13347 | if (!VT.isSimple()) |
| 13348 | return false; |
| 13349 | |
| 13350 | if (Subtarget->isThumb1Only()) |
| 13351 | return isLegalT1AddressImmediate(V, VT); |
| 13352 | else if (Subtarget->isThumb2()) |
| 13353 | return isLegalT2AddressImmediate(V, VT, Subtarget); |
| 13354 | |
| 13355 | // ARM mode. |
| 13356 | if (V < 0) |
| 13357 | V = - V; |
| 13358 | switch (VT.getSimpleVT().SimpleTy) { |
| 13359 | default: return false; |
| 13360 | case MVT::i1: |
| 13361 | case MVT::i8: |
| 13362 | case MVT::i32: |
| 13363 | // +- imm12 |
| 13364 | return isUInt<12>(V); |
| 13365 | case MVT::i16: |
| 13366 | // +- imm8 |
| 13367 | return isUInt<8>(V); |
| 13368 | case MVT::f32: |
| 13369 | case MVT::f64: |
| 13370 | if (!Subtarget->hasVFP2Base()) // FIXME: NEON? |
| 13371 | return false; |
| 13372 | return isShiftedUInt<8, 2>(V); |
| 13373 | } |
| 13374 | } |
| 13375 | |
| 13376 | bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, |
| 13377 | EVT VT) const { |
| 13378 | int Scale = AM.Scale; |
| 13379 | if (Scale < 0) |
| 13380 | return false; |
| 13381 | |
| 13382 | switch (VT.getSimpleVT().SimpleTy) { |
| 13383 | default: return false; |
| 13384 | case MVT::i1: |
| 13385 | case MVT::i8: |
| 13386 | case MVT::i16: |
| 13387 | case MVT::i32: |
| 13388 | if (Scale == 1) |
| 13389 | return true; |
| 13390 | // r + r << imm |
| 13391 | Scale = Scale & ~1; |
| 13392 | return Scale == 2 || Scale == 4 || Scale == 8; |
| 13393 | case MVT::i64: |
| 13394 | // FIXME: What are we trying to model here? ldrd doesn't have an r + r |
| 13395 | // version in Thumb mode. |
| 13396 | // r + r |
| 13397 | if (Scale == 1) |
| 13398 | return true; |
| 13399 | // r * 2 (this can be lowered to r + r). |
| 13400 | if (!AM.HasBaseReg && Scale == 2) |
| 13401 | return true; |
| 13402 | return false; |
| 13403 | case MVT::isVoid: |
| 13404 | // Note, we allow "void" uses (basically, uses that aren't loads or |
| 13405 | // stores), because arm allows folding a scale into many arithmetic |
| 13406 | // operations. This should be made more precise and revisited later. |
| 13407 | |
| 13408 | // Allow r << imm, but the imm has to be a multiple of two. |
| 13409 | if (Scale & 1) return false; |
| 13410 | return isPowerOf2_32(Scale); |
| 13411 | } |
| 13412 | } |
| 13413 | |
| 13414 | bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM, |
| 13415 | EVT VT) const { |
| 13416 | const int Scale = AM.Scale; |
| 13417 | |
| 13418 | // Negative scales are not supported in Thumb1. |
| 13419 | if (Scale < 0) |
| 13420 | return false; |
| 13421 | |
| 13422 | // Thumb1 addressing modes do not support register scaling excepting the |
| 13423 | // following cases: |
| 13424 | // 1. Scale == 1 means no scaling. |
| 13425 | // 2. Scale == 2 this can be lowered to r + r if there is no base register. |
| 13426 | return (Scale == 1) || (!AM.HasBaseReg && Scale == 2); |
| 13427 | } |
| 13428 | |
| 13429 | /// isLegalAddressingMode - Return true if the addressing mode represented |
| 13430 | /// by AM is legal for this target, for a load/store of the specified type. |
| 13431 | bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, |
| 13432 | const AddrMode &AM, Type *Ty, |
| 13433 | unsigned AS, Instruction *I) const { |
| 13434 | EVT VT = getValueType(DL, Ty, true); |
| 13435 | if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) |
| 13436 | return false; |
| 13437 | |
| 13438 | // Can never fold addr of global into load/store. |
| 13439 | if (AM.BaseGV) |
| 13440 | return false; |
| 13441 | |
| 13442 | switch (AM.Scale) { |
| 13443 | case 0: // no scale reg, must be "r+i" or "r", or "i". |
| 13444 | break; |
| 13445 | default: |
| 13446 | // ARM doesn't support any R+R*scale+imm addr modes. |
| 13447 | if (AM.BaseOffs) |
| 13448 | return false; |
| 13449 | |
| 13450 | if (!VT.isSimple()) |
| 13451 | return false; |
| 13452 | |
| 13453 | if (Subtarget->isThumb1Only()) |
| 13454 | return isLegalT1ScaledAddressingMode(AM, VT); |
| 13455 | |
| 13456 | if (Subtarget->isThumb2()) |
| 13457 | return isLegalT2ScaledAddressingMode(AM, VT); |
| 13458 | |
| 13459 | int Scale = AM.Scale; |
| 13460 | switch (VT.getSimpleVT().SimpleTy) { |
| 13461 | default: return false; |
| 13462 | case MVT::i1: |
| 13463 | case MVT::i8: |
| 13464 | case MVT::i32: |
| 13465 | if (Scale < 0) Scale = -Scale; |
| 13466 | if (Scale == 1) |
| 13467 | return true; |
| 13468 | // r + r << imm |
| 13469 | return isPowerOf2_32(Scale & ~1); |
| 13470 | case MVT::i16: |
| 13471 | case MVT::i64: |
| 13472 | // r +/- r |
| 13473 | if (Scale == 1 || (AM.HasBaseReg && Scale == -1)) |
| 13474 | return true; |
| 13475 | // r * 2 (this can be lowered to r + r). |
| 13476 | if (!AM.HasBaseReg && Scale == 2) |
| 13477 | return true; |
| 13478 | return false; |
| 13479 | |
| 13480 | case MVT::isVoid: |
| 13481 | // Note, we allow "void" uses (basically, uses that aren't loads or |
| 13482 | // stores), because arm allows folding a scale into many arithmetic |
| 13483 | // operations. This should be made more precise and revisited later. |
| 13484 | |
| 13485 | // Allow r << imm, but the imm has to be a multiple of two. |
| 13486 | if (Scale & 1) return false; |
| 13487 | return isPowerOf2_32(Scale); |
| 13488 | } |
| 13489 | } |
| 13490 | return true; |
| 13491 | } |
| 13492 | |
| 13493 | /// isLegalICmpImmediate - Return true if the specified immediate is legal |
| 13494 | /// icmp immediate, that is the target has icmp instructions which can compare |
| 13495 | /// a register against the immediate without having to materialize the |
| 13496 | /// immediate into a register. |
| 13497 | bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { |
| 13498 | // Thumb2 and ARM modes can use cmn for negative immediates. |
| 13499 | if (!Subtarget->isThumb()) |
| 13500 | return ARM_AM::getSOImmVal((uint32_t)Imm) != -1 || |
| 13501 | ARM_AM::getSOImmVal(-(uint32_t)Imm) != -1; |
| 13502 | if (Subtarget->isThumb2()) |
| 13503 | return ARM_AM::getT2SOImmVal((uint32_t)Imm) != -1 || |
| 13504 | ARM_AM::getT2SOImmVal(-(uint32_t)Imm) != -1; |
| 13505 | // Thumb1 doesn't have cmn, and only 8-bit immediates. |
| 13506 | return Imm >= 0 && Imm <= 255; |
| 13507 | } |
| 13508 | |
| 13509 | /// isLegalAddImmediate - Return true if the specified immediate is a legal add |
| 13510 | /// *or sub* immediate, that is the target has add or sub instructions which can |
| 13511 | /// add a register with the immediate without having to materialize the |
| 13512 | /// immediate into a register. |
| 13513 | bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { |
| 13514 | // Same encoding for add/sub, just flip the sign. |
| 13515 | int64_t AbsImm = std::abs(Imm); |
| 13516 | if (!Subtarget->isThumb()) |
| 13517 | return ARM_AM::getSOImmVal(AbsImm) != -1; |
| 13518 | if (Subtarget->isThumb2()) |
| 13519 | return ARM_AM::getT2SOImmVal(AbsImm) != -1; |
| 13520 | // Thumb1 only has 8-bit unsigned immediate. |
| 13521 | return AbsImm >= 0 && AbsImm <= 255; |
| 13522 | } |
| 13523 | |
| 13524 | static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, |
| 13525 | bool isSEXTLoad, SDValue &Base, |
| 13526 | SDValue &Offset, bool &isInc, |
| 13527 | SelectionDAG &DAG) { |
| 13528 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 13529 | return false; |
| 13530 | |
| 13531 | if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { |
| 13532 | // AddressingMode 3 |
| 13533 | Base = Ptr->getOperand(0); |
| 13534 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { |
| 13535 | int RHSC = (int)RHS->getZExtValue(); |
| 13536 | if (RHSC < 0 && RHSC > -256) { |
| 13537 | assert(Ptr->getOpcode() == ISD::ADD); |
| 13538 | isInc = false; |
| 13539 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
| 13540 | return true; |
| 13541 | } |
| 13542 | } |
| 13543 | isInc = (Ptr->getOpcode() == ISD::ADD); |
| 13544 | Offset = Ptr->getOperand(1); |
| 13545 | return true; |
| 13546 | } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { |
| 13547 | // AddressingMode 2 |
| 13548 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { |
| 13549 | int RHSC = (int)RHS->getZExtValue(); |
| 13550 | if (RHSC < 0 && RHSC > -0x1000) { |
| 13551 | assert(Ptr->getOpcode() == ISD::ADD); |
| 13552 | isInc = false; |
| 13553 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
| 13554 | Base = Ptr->getOperand(0); |
| 13555 | return true; |
| 13556 | } |
| 13557 | } |
| 13558 | |
| 13559 | if (Ptr->getOpcode() == ISD::ADD) { |
| 13560 | isInc = true; |
| 13561 | ARM_AM::ShiftOpc ShOpcVal= |
| 13562 | ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); |
| 13563 | if (ShOpcVal != ARM_AM::no_shift) { |
| 13564 | Base = Ptr->getOperand(1); |
| 13565 | Offset = Ptr->getOperand(0); |
| 13566 | } else { |
| 13567 | Base = Ptr->getOperand(0); |
| 13568 | Offset = Ptr->getOperand(1); |
| 13569 | } |
| 13570 | return true; |
| 13571 | } |
| 13572 | |
| 13573 | isInc = (Ptr->getOpcode() == ISD::ADD); |
| 13574 | Base = Ptr->getOperand(0); |
| 13575 | Offset = Ptr->getOperand(1); |
| 13576 | return true; |
| 13577 | } |
| 13578 | |
| 13579 | // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. |
| 13580 | return false; |
| 13581 | } |
| 13582 | |
| 13583 | static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, |
| 13584 | bool isSEXTLoad, SDValue &Base, |
| 13585 | SDValue &Offset, bool &isInc, |
| 13586 | SelectionDAG &DAG) { |
| 13587 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 13588 | return false; |
| 13589 | |
| 13590 | Base = Ptr->getOperand(0); |
| 13591 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { |
| 13592 | int RHSC = (int)RHS->getZExtValue(); |
| 13593 | if (RHSC < 0 && RHSC > -0x100) { // 8 bits. |
| 13594 | assert(Ptr->getOpcode() == ISD::ADD); |
| 13595 | isInc = false; |
| 13596 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
| 13597 | return true; |
| 13598 | } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. |
| 13599 | isInc = Ptr->getOpcode() == ISD::ADD; |
| 13600 | Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
| 13601 | return true; |
| 13602 | } |
| 13603 | } |
| 13604 | |
| 13605 | return false; |
| 13606 | } |
| 13607 | |
| 13608 | /// getPreIndexedAddressParts - returns true by value, base pointer and |
| 13609 | /// offset pointer and addressing mode by reference if the node's address |
| 13610 | /// can be legally represented as pre-indexed load / store address. |
| 13611 | bool |
| 13612 | ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, |
| 13613 | SDValue &Offset, |
| 13614 | ISD::MemIndexedMode &AM, |
| 13615 | SelectionDAG &DAG) const { |
| 13616 | if (Subtarget->isThumb1Only()) |
| 13617 | return false; |
| 13618 | |
| 13619 | EVT VT; |
| 13620 | SDValue Ptr; |
| 13621 | bool isSEXTLoad = false; |
| 13622 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { |
| 13623 | Ptr = LD->getBasePtr(); |
| 13624 | VT = LD->getMemoryVT(); |
| 13625 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 13626 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { |
| 13627 | Ptr = ST->getBasePtr(); |
| 13628 | VT = ST->getMemoryVT(); |
| 13629 | } else |
| 13630 | return false; |
| 13631 | |
| 13632 | bool isInc; |
| 13633 | bool isLegal = false; |
| 13634 | if (Subtarget->isThumb2()) |
| 13635 | isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, |
| 13636 | Offset, isInc, DAG); |
| 13637 | else |
| 13638 | isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, |
| 13639 | Offset, isInc, DAG); |
| 13640 | if (!isLegal) |
| 13641 | return false; |
| 13642 | |
| 13643 | AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; |
| 13644 | return true; |
| 13645 | } |
| 13646 | |
| 13647 | /// getPostIndexedAddressParts - returns true by value, base pointer and |
| 13648 | /// offset pointer and addressing mode by reference if this node can be |
| 13649 | /// combined with a load / store to form a post-indexed load / store. |
| 13650 | bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, |
| 13651 | SDValue &Base, |
| 13652 | SDValue &Offset, |
| 13653 | ISD::MemIndexedMode &AM, |
| 13654 | SelectionDAG &DAG) const { |
| 13655 | EVT VT; |
| 13656 | SDValue Ptr; |
| 13657 | bool isSEXTLoad = false, isNonExt; |
| 13658 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { |
| 13659 | VT = LD->getMemoryVT(); |
| 13660 | Ptr = LD->getBasePtr(); |
| 13661 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 13662 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; |
| 13663 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { |
| 13664 | VT = ST->getMemoryVT(); |
| 13665 | Ptr = ST->getBasePtr(); |
| 13666 | isNonExt = !ST->isTruncatingStore(); |
| 13667 | } else |
| 13668 | return false; |
| 13669 | |
| 13670 | if (Subtarget->isThumb1Only()) { |
| 13671 | // Thumb-1 can do a limited post-inc load or store as an updating LDM. It |
| 13672 | // must be non-extending/truncating, i32, with an offset of 4. |
| 13673 | assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!" ); |
| 13674 | if (Op->getOpcode() != ISD::ADD || !isNonExt) |
| 13675 | return false; |
| 13676 | auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1)); |
| 13677 | if (!RHS || RHS->getZExtValue() != 4) |
| 13678 | return false; |
| 13679 | |
| 13680 | Offset = Op->getOperand(1); |
| 13681 | Base = Op->getOperand(0); |
| 13682 | AM = ISD::POST_INC; |
| 13683 | return true; |
| 13684 | } |
| 13685 | |
| 13686 | bool isInc; |
| 13687 | bool isLegal = false; |
| 13688 | if (Subtarget->isThumb2()) |
| 13689 | isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, |
| 13690 | isInc, DAG); |
| 13691 | else |
| 13692 | isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, |
| 13693 | isInc, DAG); |
| 13694 | if (!isLegal) |
| 13695 | return false; |
| 13696 | |
| 13697 | if (Ptr != Base) { |
| 13698 | // Swap base ptr and offset to catch more post-index load / store when |
| 13699 | // it's legal. In Thumb2 mode, offset must be an immediate. |
| 13700 | if (Ptr == Offset && Op->getOpcode() == ISD::ADD && |
| 13701 | !Subtarget->isThumb2()) |
| 13702 | std::swap(Base, Offset); |
| 13703 | |
| 13704 | // Post-indexed load / store update the base pointer. |
| 13705 | if (Ptr != Base) |
| 13706 | return false; |
| 13707 | } |
| 13708 | |
| 13709 | AM = isInc ? ISD::POST_INC : ISD::POST_DEC; |
| 13710 | return true; |
| 13711 | } |
| 13712 | |
| 13713 | void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, |
| 13714 | KnownBits &Known, |
| 13715 | const APInt &DemandedElts, |
| 13716 | const SelectionDAG &DAG, |
| 13717 | unsigned Depth) const { |
| 13718 | unsigned BitWidth = Known.getBitWidth(); |
| 13719 | Known.resetAll(); |
| 13720 | switch (Op.getOpcode()) { |
| 13721 | default: break; |
| 13722 | case ARMISD::ADDC: |
| 13723 | case ARMISD::ADDE: |
| 13724 | case ARMISD::SUBC: |
| 13725 | case ARMISD::SUBE: |
| 13726 | // Special cases when we convert a carry to a boolean. |
| 13727 | if (Op.getResNo() == 0) { |
| 13728 | SDValue LHS = Op.getOperand(0); |
| 13729 | SDValue RHS = Op.getOperand(1); |
| 13730 | // (ADDE 0, 0, C) will give us a single bit. |
| 13731 | if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(LHS) && |
| 13732 | isNullConstant(RHS)) { |
| 13733 | Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); |
| 13734 | return; |
| 13735 | } |
| 13736 | } |
| 13737 | break; |
| 13738 | case ARMISD::CMOV: { |
| 13739 | // Bits are known zero/one if known on the LHS and RHS. |
| 13740 | Known = DAG.computeKnownBits(Op.getOperand(0), Depth+1); |
| 13741 | if (Known.isUnknown()) |
| 13742 | return; |
| 13743 | |
| 13744 | KnownBits KnownRHS = DAG.computeKnownBits(Op.getOperand(1), Depth+1); |
| 13745 | Known.Zero &= KnownRHS.Zero; |
| 13746 | Known.One &= KnownRHS.One; |
| 13747 | return; |
| 13748 | } |
| 13749 | case ISD::INTRINSIC_W_CHAIN: { |
| 13750 | ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); |
| 13751 | Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); |
| 13752 | switch (IntID) { |
| 13753 | default: return; |
| 13754 | case Intrinsic::arm_ldaex: |
| 13755 | case Intrinsic::arm_ldrex: { |
| 13756 | EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); |
| 13757 | unsigned MemBits = VT.getScalarSizeInBits(); |
| 13758 | Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); |
| 13759 | return; |
| 13760 | } |
| 13761 | } |
| 13762 | } |
| 13763 | case ARMISD::BFI: { |
| 13764 | // Conservatively, we can recurse down the first operand |
| 13765 | // and just mask out all affected bits. |
| 13766 | Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); |
| 13767 | |
| 13768 | // The operand to BFI is already a mask suitable for removing the bits it |
| 13769 | // sets. |
| 13770 | ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2)); |
| 13771 | const APInt &Mask = CI->getAPIntValue(); |
| 13772 | Known.Zero &= Mask; |
| 13773 | Known.One &= Mask; |
| 13774 | return; |
| 13775 | } |
| 13776 | case ARMISD::VGETLANEs: |
| 13777 | case ARMISD::VGETLANEu: { |
| 13778 | const SDValue &SrcSV = Op.getOperand(0); |
| 13779 | EVT VecVT = SrcSV.getValueType(); |
| 13780 | assert(VecVT.isVector() && "VGETLANE expected a vector type" ); |
| 13781 | const unsigned NumSrcElts = VecVT.getVectorNumElements(); |
| 13782 | ConstantSDNode *Pos = cast<ConstantSDNode>(Op.getOperand(1).getNode()); |
| 13783 | assert(Pos->getAPIntValue().ult(NumSrcElts) && |
| 13784 | "VGETLANE index out of bounds" ); |
| 13785 | unsigned Idx = Pos->getZExtValue(); |
| 13786 | APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); |
| 13787 | Known = DAG.computeKnownBits(SrcSV, DemandedElt, Depth + 1); |
| 13788 | |
| 13789 | EVT VT = Op.getValueType(); |
| 13790 | const unsigned DstSz = VT.getScalarSizeInBits(); |
| 13791 | const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits(); |
| 13792 | (void)SrcSz; |
| 13793 | assert(SrcSz == Known.getBitWidth()); |
| 13794 | assert(DstSz > SrcSz); |
| 13795 | if (Op.getOpcode() == ARMISD::VGETLANEs) |
| 13796 | Known = Known.sext(DstSz); |
| 13797 | else { |
| 13798 | Known = Known.zext(DstSz, true /* extended bits are known zero */); |
| 13799 | } |
| 13800 | assert(DstSz == Known.getBitWidth()); |
| 13801 | break; |
| 13802 | } |
| 13803 | } |
| 13804 | } |
| 13805 | |
| 13806 | bool |
| 13807 | ARMTargetLowering::targetShrinkDemandedConstant(SDValue Op, |
| 13808 | const APInt &DemandedAPInt, |
| 13809 | TargetLoweringOpt &TLO) const { |
| 13810 | // Delay optimization, so we don't have to deal with illegal types, or block |
| 13811 | // optimizations. |
| 13812 | if (!TLO.LegalOps) |
| 13813 | return false; |
| 13814 | |
| 13815 | // Only optimize AND for now. |
| 13816 | if (Op.getOpcode() != ISD::AND) |
| 13817 | return false; |
| 13818 | |
| 13819 | EVT VT = Op.getValueType(); |
| 13820 | |
| 13821 | // Ignore vectors. |
| 13822 | if (VT.isVector()) |
| 13823 | return false; |
| 13824 | |
| 13825 | assert(VT == MVT::i32 && "Unexpected integer type" ); |
| 13826 | |
| 13827 | // Make sure the RHS really is a constant. |
| 13828 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); |
| 13829 | if (!C) |
| 13830 | return false; |
| 13831 | |
| 13832 | unsigned Mask = C->getZExtValue(); |
| 13833 | |
| 13834 | unsigned Demanded = DemandedAPInt.getZExtValue(); |
| 13835 | unsigned ShrunkMask = Mask & Demanded; |
| 13836 | unsigned ExpandedMask = Mask | ~Demanded; |
| 13837 | |
| 13838 | // If the mask is all zeros, let the target-independent code replace the |
| 13839 | // result with zero. |
| 13840 | if (ShrunkMask == 0) |
| 13841 | return false; |
| 13842 | |
| 13843 | // If the mask is all ones, erase the AND. (Currently, the target-independent |
| 13844 | // code won't do this, so we have to do it explicitly to avoid an infinite |
| 13845 | // loop in obscure cases.) |
| 13846 | if (ExpandedMask == ~0U) |
| 13847 | return TLO.CombineTo(Op, Op.getOperand(0)); |
| 13848 | |
| 13849 | auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool { |
| 13850 | return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0; |
| 13851 | }; |
| 13852 | auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool { |
| 13853 | if (NewMask == Mask) |
| 13854 | return true; |
| 13855 | SDLoc DL(Op); |
| 13856 | SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT); |
| 13857 | SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); |
| 13858 | return TLO.CombineTo(Op, NewOp); |
| 13859 | }; |
| 13860 | |
| 13861 | // Prefer uxtb mask. |
| 13862 | if (IsLegalMask(0xFF)) |
| 13863 | return UseMask(0xFF); |
| 13864 | |
| 13865 | // Prefer uxth mask. |
| 13866 | if (IsLegalMask(0xFFFF)) |
| 13867 | return UseMask(0xFFFF); |
| 13868 | |
| 13869 | // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2. |
| 13870 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. |
| 13871 | if (ShrunkMask < 256) |
| 13872 | return UseMask(ShrunkMask); |
| 13873 | |
| 13874 | // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2. |
| 13875 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. |
| 13876 | if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256) |
| 13877 | return UseMask(ExpandedMask); |
| 13878 | |
| 13879 | // Potential improvements: |
| 13880 | // |
| 13881 | // We could try to recognize lsls+lsrs or lsrs+lsls pairs here. |
| 13882 | // We could try to prefer Thumb1 immediates which can be lowered to a |
| 13883 | // two-instruction sequence. |
| 13884 | // We could try to recognize more legal ARM/Thumb2 immediates here. |
| 13885 | |
| 13886 | return false; |
| 13887 | } |
| 13888 | |
| 13889 | |
| 13890 | //===----------------------------------------------------------------------===// |
| 13891 | // ARM Inline Assembly Support |
| 13892 | //===----------------------------------------------------------------------===// |
| 13893 | |
| 13894 | bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { |
| 13895 | // Looking for "rev" which is V6+. |
| 13896 | if (!Subtarget->hasV6Ops()) |
| 13897 | return false; |
| 13898 | |
| 13899 | InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); |
| 13900 | std::string AsmStr = IA->getAsmString(); |
| 13901 | SmallVector<StringRef, 4> AsmPieces; |
| 13902 | SplitString(AsmStr, AsmPieces, ";\n" ); |
| 13903 | |
| 13904 | switch (AsmPieces.size()) { |
| 13905 | default: return false; |
| 13906 | case 1: |
| 13907 | AsmStr = AsmPieces[0]; |
| 13908 | AsmPieces.clear(); |
| 13909 | SplitString(AsmStr, AsmPieces, " \t," ); |
| 13910 | |
| 13911 | // rev $0, $1 |
| 13912 | if (AsmPieces.size() == 3 && |
| 13913 | AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && |
| 13914 | IA->getConstraintString().compare(0, 4, "=l,l" ) == 0) { |
| 13915 | IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); |
| 13916 | if (Ty && Ty->getBitWidth() == 32) |
| 13917 | return IntrinsicLowering::LowerToByteSwap(CI); |
| 13918 | } |
| 13919 | break; |
| 13920 | } |
| 13921 | |
| 13922 | return false; |
| 13923 | } |
| 13924 | |
| 13925 | const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { |
| 13926 | // At this point, we have to lower this constraint to something else, so we |
| 13927 | // lower it to an "r" or "w". However, by doing this we will force the result |
| 13928 | // to be in register, while the X constraint is much more permissive. |
| 13929 | // |
| 13930 | // Although we are correct (we are free to emit anything, without |
| 13931 | // constraints), we might break use cases that would expect us to be more |
| 13932 | // efficient and emit something else. |
| 13933 | if (!Subtarget->hasVFP2Base()) |
| 13934 | return "r" ; |
| 13935 | if (ConstraintVT.isFloatingPoint()) |
| 13936 | return "w" ; |
| 13937 | if (ConstraintVT.isVector() && Subtarget->hasNEON() && |
| 13938 | (ConstraintVT.getSizeInBits() == 64 || |
| 13939 | ConstraintVT.getSizeInBits() == 128)) |
| 13940 | return "w" ; |
| 13941 | |
| 13942 | return "r" ; |
| 13943 | } |
| 13944 | |
| 13945 | /// getConstraintType - Given a constraint letter, return the type of |
| 13946 | /// constraint it is for this target. |
| 13947 | ARMTargetLowering::ConstraintType |
| 13948 | ARMTargetLowering::getConstraintType(StringRef Constraint) const { |
| 13949 | if (Constraint.size() == 1) { |
| 13950 | switch (Constraint[0]) { |
| 13951 | default: break; |
| 13952 | case 'l': return C_RegisterClass; |
| 13953 | case 'w': return C_RegisterClass; |
| 13954 | case 'h': return C_RegisterClass; |
| 13955 | case 'x': return C_RegisterClass; |
| 13956 | case 't': return C_RegisterClass; |
| 13957 | case 'j': return C_Other; // Constant for movw. |
| 13958 | // An address with a single base register. Due to the way we |
| 13959 | // currently handle addresses it is the same as an 'r' memory constraint. |
| 13960 | case 'Q': return C_Memory; |
| 13961 | } |
| 13962 | } else if (Constraint.size() == 2) { |
| 13963 | switch (Constraint[0]) { |
| 13964 | default: break; |
| 13965 | // All 'U+' constraints are addresses. |
| 13966 | case 'U': return C_Memory; |
| 13967 | } |
| 13968 | } |
| 13969 | return TargetLowering::getConstraintType(Constraint); |
| 13970 | } |
| 13971 | |
| 13972 | /// Examine constraint type and operand type and determine a weight value. |
| 13973 | /// This object must already have been set up with the operand type |
| 13974 | /// and the current alternative constraint selected. |
| 13975 | TargetLowering::ConstraintWeight |
| 13976 | ARMTargetLowering::getSingleConstraintMatchWeight( |
| 13977 | AsmOperandInfo &info, const char *constraint) const { |
| 13978 | ConstraintWeight weight = CW_Invalid; |
| 13979 | Value *CallOperandVal = info.CallOperandVal; |
| 13980 | // If we don't have a value, we can't do a match, |
| 13981 | // but allow it at the lowest weight. |
| 13982 | if (!CallOperandVal) |
| 13983 | return CW_Default; |
| 13984 | Type *type = CallOperandVal->getType(); |
| 13985 | // Look at the constraint type. |
| 13986 | switch (*constraint) { |
| 13987 | default: |
| 13988 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); |
| 13989 | break; |
| 13990 | case 'l': |
| 13991 | if (type->isIntegerTy()) { |
| 13992 | if (Subtarget->isThumb()) |
| 13993 | weight = CW_SpecificReg; |
| 13994 | else |
| 13995 | weight = CW_Register; |
| 13996 | } |
| 13997 | break; |
| 13998 | case 'w': |
| 13999 | if (type->isFloatingPointTy()) |
| 14000 | weight = CW_Register; |
| 14001 | break; |
| 14002 | } |
| 14003 | return weight; |
| 14004 | } |
| 14005 | |
| 14006 | using RCPair = std::pair<unsigned, const TargetRegisterClass *>; |
| 14007 | |
| 14008 | RCPair ARMTargetLowering::getRegForInlineAsmConstraint( |
| 14009 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { |
| 14010 | if (Constraint.size() == 1) { |
| 14011 | // GCC ARM Constraint Letters |
| 14012 | switch (Constraint[0]) { |
| 14013 | case 'l': // Low regs or general regs. |
| 14014 | if (Subtarget->isThumb()) |
| 14015 | return RCPair(0U, &ARM::tGPRRegClass); |
| 14016 | return RCPair(0U, &ARM::GPRRegClass); |
| 14017 | case 'h': // High regs or no regs. |
| 14018 | if (Subtarget->isThumb()) |
| 14019 | return RCPair(0U, &ARM::hGPRRegClass); |
| 14020 | break; |
| 14021 | case 'r': |
| 14022 | if (Subtarget->isThumb1Only()) |
| 14023 | return RCPair(0U, &ARM::tGPRRegClass); |
| 14024 | return RCPair(0U, &ARM::GPRRegClass); |
| 14025 | case 'w': |
| 14026 | if (VT == MVT::Other) |
| 14027 | break; |
| 14028 | if (VT == MVT::f32) |
| 14029 | return RCPair(0U, &ARM::SPRRegClass); |
| 14030 | if (VT.getSizeInBits() == 64) |
| 14031 | return RCPair(0U, &ARM::DPRRegClass); |
| 14032 | if (VT.getSizeInBits() == 128) |
| 14033 | return RCPair(0U, &ARM::QPRRegClass); |
| 14034 | break; |
| 14035 | case 'x': |
| 14036 | if (VT == MVT::Other) |
| 14037 | break; |
| 14038 | if (VT == MVT::f32) |
| 14039 | return RCPair(0U, &ARM::SPR_8RegClass); |
| 14040 | if (VT.getSizeInBits() == 64) |
| 14041 | return RCPair(0U, &ARM::DPR_8RegClass); |
| 14042 | if (VT.getSizeInBits() == 128) |
| 14043 | return RCPair(0U, &ARM::QPR_8RegClass); |
| 14044 | break; |
| 14045 | case 't': |
| 14046 | if (VT == MVT::Other) |
| 14047 | break; |
| 14048 | if (VT == MVT::f32 || VT == MVT::i32) |
| 14049 | return RCPair(0U, &ARM::SPRRegClass); |
| 14050 | if (VT.getSizeInBits() == 64) |
| 14051 | return RCPair(0U, &ARM::DPR_VFP2RegClass); |
| 14052 | if (VT.getSizeInBits() == 128) |
| 14053 | return RCPair(0U, &ARM::QPR_VFP2RegClass); |
| 14054 | break; |
| 14055 | } |
| 14056 | } |
| 14057 | if (StringRef("{cc}" ).equals_lower(Constraint)) |
| 14058 | return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); |
| 14059 | |
| 14060 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
| 14061 | } |
| 14062 | |
| 14063 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops |
| 14064 | /// vector. If it is invalid, don't add anything to Ops. |
| 14065 | void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, |
| 14066 | std::string &Constraint, |
| 14067 | std::vector<SDValue>&Ops, |
| 14068 | SelectionDAG &DAG) const { |
| 14069 | SDValue Result; |
| 14070 | |
| 14071 | // Currently only support length 1 constraints. |
| 14072 | if (Constraint.length() != 1) return; |
| 14073 | |
| 14074 | char ConstraintLetter = Constraint[0]; |
| 14075 | switch (ConstraintLetter) { |
| 14076 | default: break; |
| 14077 | case 'j': |
| 14078 | case 'I': case 'J': case 'K': case 'L': |
| 14079 | case 'M': case 'N': case 'O': |
| 14080 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); |
| 14081 | if (!C) |
| 14082 | return; |
| 14083 | |
| 14084 | int64_t CVal64 = C->getSExtValue(); |
| 14085 | int CVal = (int) CVal64; |
| 14086 | // None of these constraints allow values larger than 32 bits. Check |
| 14087 | // that the value fits in an int. |
| 14088 | if (CVal != CVal64) |
| 14089 | return; |
| 14090 | |
| 14091 | switch (ConstraintLetter) { |
| 14092 | case 'j': |
| 14093 | // Constant suitable for movw, must be between 0 and |
| 14094 | // 65535. |
| 14095 | if (Subtarget->hasV6T2Ops()) |
| 14096 | if (CVal >= 0 && CVal <= 65535) |
| 14097 | break; |
| 14098 | return; |
| 14099 | case 'I': |
| 14100 | if (Subtarget->isThumb1Only()) { |
| 14101 | // This must be a constant between 0 and 255, for ADD |
| 14102 | // immediates. |
| 14103 | if (CVal >= 0 && CVal <= 255) |
| 14104 | break; |
| 14105 | } else if (Subtarget->isThumb2()) { |
| 14106 | // A constant that can be used as an immediate value in a |
| 14107 | // data-processing instruction. |
| 14108 | if (ARM_AM::getT2SOImmVal(CVal) != -1) |
| 14109 | break; |
| 14110 | } else { |
| 14111 | // A constant that can be used as an immediate value in a |
| 14112 | // data-processing instruction. |
| 14113 | if (ARM_AM::getSOImmVal(CVal) != -1) |
| 14114 | break; |
| 14115 | } |
| 14116 | return; |
| 14117 | |
| 14118 | case 'J': |
| 14119 | if (Subtarget->isThumb1Only()) { |
| 14120 | // This must be a constant between -255 and -1, for negated ADD |
| 14121 | // immediates. This can be used in GCC with an "n" modifier that |
| 14122 | // prints the negated value, for use with SUB instructions. It is |
| 14123 | // not useful otherwise but is implemented for compatibility. |
| 14124 | if (CVal >= -255 && CVal <= -1) |
| 14125 | break; |
| 14126 | } else { |
| 14127 | // This must be a constant between -4095 and 4095. It is not clear |
| 14128 | // what this constraint is intended for. Implemented for |
| 14129 | // compatibility with GCC. |
| 14130 | if (CVal >= -4095 && CVal <= 4095) |
| 14131 | break; |
| 14132 | } |
| 14133 | return; |
| 14134 | |
| 14135 | case 'K': |
| 14136 | if (Subtarget->isThumb1Only()) { |
| 14137 | // A 32-bit value where only one byte has a nonzero value. Exclude |
| 14138 | // zero to match GCC. This constraint is used by GCC internally for |
| 14139 | // constants that can be loaded with a move/shift combination. |
| 14140 | // It is not useful otherwise but is implemented for compatibility. |
| 14141 | if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) |
| 14142 | break; |
| 14143 | } else if (Subtarget->isThumb2()) { |
| 14144 | // A constant whose bitwise inverse can be used as an immediate |
| 14145 | // value in a data-processing instruction. This can be used in GCC |
| 14146 | // with a "B" modifier that prints the inverted value, for use with |
| 14147 | // BIC and MVN instructions. It is not useful otherwise but is |
| 14148 | // implemented for compatibility. |
| 14149 | if (ARM_AM::getT2SOImmVal(~CVal) != -1) |
| 14150 | break; |
| 14151 | } else { |
| 14152 | // A constant whose bitwise inverse can be used as an immediate |
| 14153 | // value in a data-processing instruction. This can be used in GCC |
| 14154 | // with a "B" modifier that prints the inverted value, for use with |
| 14155 | // BIC and MVN instructions. It is not useful otherwise but is |
| 14156 | // implemented for compatibility. |
| 14157 | if (ARM_AM::getSOImmVal(~CVal) != -1) |
| 14158 | break; |
| 14159 | } |
| 14160 | return; |
| 14161 | |
| 14162 | case 'L': |
| 14163 | if (Subtarget->isThumb1Only()) { |
| 14164 | // This must be a constant between -7 and 7, |
| 14165 | // for 3-operand ADD/SUB immediate instructions. |
| 14166 | if (CVal >= -7 && CVal < 7) |
| 14167 | break; |
| 14168 | } else if (Subtarget->isThumb2()) { |
| 14169 | // A constant whose negation can be used as an immediate value in a |
| 14170 | // data-processing instruction. This can be used in GCC with an "n" |
| 14171 | // modifier that prints the negated value, for use with SUB |
| 14172 | // instructions. It is not useful otherwise but is implemented for |
| 14173 | // compatibility. |
| 14174 | if (ARM_AM::getT2SOImmVal(-CVal) != -1) |
| 14175 | break; |
| 14176 | } else { |
| 14177 | // A constant whose negation can be used as an immediate value in a |
| 14178 | // data-processing instruction. This can be used in GCC with an "n" |
| 14179 | // modifier that prints the negated value, for use with SUB |
| 14180 | // instructions. It is not useful otherwise but is implemented for |
| 14181 | // compatibility. |
| 14182 | if (ARM_AM::getSOImmVal(-CVal) != -1) |
| 14183 | break; |
| 14184 | } |
| 14185 | return; |
| 14186 | |
| 14187 | case 'M': |
| 14188 | if (Subtarget->isThumb1Only()) { |
| 14189 | // This must be a multiple of 4 between 0 and 1020, for |
| 14190 | // ADD sp + immediate. |
| 14191 | if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) |
| 14192 | break; |
| 14193 | } else { |
| 14194 | // A power of two or a constant between 0 and 32. This is used in |
| 14195 | // GCC for the shift amount on shifted register operands, but it is |
| 14196 | // useful in general for any shift amounts. |
| 14197 | if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) |
| 14198 | break; |
| 14199 | } |
| 14200 | return; |
| 14201 | |
| 14202 | case 'N': |
| 14203 | if (Subtarget->isThumb()) { // FIXME thumb2 |
| 14204 | // This must be a constant between 0 and 31, for shift amounts. |
| 14205 | if (CVal >= 0 && CVal <= 31) |
| 14206 | break; |
| 14207 | } |
| 14208 | return; |
| 14209 | |
| 14210 | case 'O': |
| 14211 | if (Subtarget->isThumb()) { // FIXME thumb2 |
| 14212 | // This must be a multiple of 4 between -508 and 508, for |
| 14213 | // ADD/SUB sp = sp + immediate. |
| 14214 | if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) |
| 14215 | break; |
| 14216 | } |
| 14217 | return; |
| 14218 | } |
| 14219 | Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType()); |
| 14220 | break; |
| 14221 | } |
| 14222 | |
| 14223 | if (Result.getNode()) { |
| 14224 | Ops.push_back(Result); |
| 14225 | return; |
| 14226 | } |
| 14227 | return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); |
| 14228 | } |
| 14229 | |
| 14230 | static RTLIB::Libcall getDivRemLibcall( |
| 14231 | const SDNode *N, MVT::SimpleValueType SVT) { |
| 14232 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || |
| 14233 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && |
| 14234 | "Unhandled Opcode in getDivRemLibcall" ); |
| 14235 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
| 14236 | N->getOpcode() == ISD::SREM; |
| 14237 | RTLIB::Libcall LC; |
| 14238 | switch (SVT) { |
| 14239 | default: llvm_unreachable("Unexpected request for libcall!" ); |
| 14240 | case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; |
| 14241 | case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; |
| 14242 | case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; |
| 14243 | case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; |
| 14244 | } |
| 14245 | return LC; |
| 14246 | } |
| 14247 | |
| 14248 | static TargetLowering::ArgListTy getDivRemArgList( |
| 14249 | const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) { |
| 14250 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || |
| 14251 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && |
| 14252 | "Unhandled Opcode in getDivRemArgList" ); |
| 14253 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
| 14254 | N->getOpcode() == ISD::SREM; |
| 14255 | TargetLowering::ArgListTy Args; |
| 14256 | TargetLowering::ArgListEntry Entry; |
| 14257 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 14258 | EVT ArgVT = N->getOperand(i).getValueType(); |
| 14259 | Type *ArgTy = ArgVT.getTypeForEVT(*Context); |
| 14260 | Entry.Node = N->getOperand(i); |
| 14261 | Entry.Ty = ArgTy; |
| 14262 | Entry.IsSExt = isSigned; |
| 14263 | Entry.IsZExt = !isSigned; |
| 14264 | Args.push_back(Entry); |
| 14265 | } |
| 14266 | if (Subtarget->isTargetWindows() && Args.size() >= 2) |
| 14267 | std::swap(Args[0], Args[1]); |
| 14268 | return Args; |
| 14269 | } |
| 14270 | |
| 14271 | SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { |
| 14272 | assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || |
| 14273 | Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || |
| 14274 | Subtarget->isTargetWindows()) && |
| 14275 | "Register-based DivRem lowering only" ); |
| 14276 | unsigned Opcode = Op->getOpcode(); |
| 14277 | assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && |
| 14278 | "Invalid opcode for Div/Rem lowering" ); |
| 14279 | bool isSigned = (Opcode == ISD::SDIVREM); |
| 14280 | EVT VT = Op->getValueType(0); |
| 14281 | Type *Ty = VT.getTypeForEVT(*DAG.getContext()); |
| 14282 | SDLoc dl(Op); |
| 14283 | |
| 14284 | // If the target has hardware divide, use divide + multiply + subtract: |
| 14285 | // div = a / b |
| 14286 | // rem = a - b * div |
| 14287 | // return {div, rem} |
| 14288 | // This should be lowered into UDIV/SDIV + MLS later on. |
| 14289 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
| 14290 | : Subtarget->hasDivideInARMMode(); |
| 14291 | if (hasDivide && Op->getValueType(0).isSimple() && |
| 14292 | Op->getSimpleValueType(0) == MVT::i32) { |
| 14293 | unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; |
| 14294 | const SDValue Dividend = Op->getOperand(0); |
| 14295 | const SDValue Divisor = Op->getOperand(1); |
| 14296 | SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor); |
| 14297 | SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor); |
| 14298 | SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); |
| 14299 | |
| 14300 | SDValue Values[2] = {Div, Rem}; |
| 14301 | return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values); |
| 14302 | } |
| 14303 | |
| 14304 | RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(), |
| 14305 | VT.getSimpleVT().SimpleTy); |
| 14306 | SDValue InChain = DAG.getEntryNode(); |
| 14307 | |
| 14308 | TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(), |
| 14309 | DAG.getContext(), |
| 14310 | Subtarget); |
| 14311 | |
| 14312 | SDValue Callee = DAG.getExternalFunctionSymbol(getLibcallName(LC)); |
| 14313 | |
| 14314 | Type *RetTy = StructType::get(Ty, Ty); |
| 14315 | |
| 14316 | if (Subtarget->isTargetWindows()) |
| 14317 | InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain); |
| 14318 | |
| 14319 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 14320 | CLI.setDebugLoc(dl).setChain(InChain) |
| 14321 | .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) |
| 14322 | .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned); |
| 14323 | |
| 14324 | std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); |
| 14325 | return CallInfo.first; |
| 14326 | } |
| 14327 | |
| 14328 | // Lowers REM using divmod helpers |
| 14329 | // see RTABI section 4.2/4.3 |
| 14330 | SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { |
| 14331 | // Build return types (div and rem) |
| 14332 | std::vector<Type*> RetTyParams; |
| 14333 | Type *RetTyElement; |
| 14334 | |
| 14335 | switch (N->getValueType(0).getSimpleVT().SimpleTy) { |
| 14336 | default: llvm_unreachable("Unexpected request for libcall!" ); |
| 14337 | case MVT::i8: RetTyElement = Type::getInt8Ty(*DAG.getContext()); break; |
| 14338 | case MVT::i16: RetTyElement = Type::getInt16Ty(*DAG.getContext()); break; |
| 14339 | case MVT::i32: RetTyElement = Type::getInt32Ty(*DAG.getContext()); break; |
| 14340 | case MVT::i64: RetTyElement = Type::getInt64Ty(*DAG.getContext()); break; |
| 14341 | } |
| 14342 | |
| 14343 | RetTyParams.push_back(RetTyElement); |
| 14344 | RetTyParams.push_back(RetTyElement); |
| 14345 | ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); |
| 14346 | Type *RetTy = StructType::get(*DAG.getContext(), ret); |
| 14347 | |
| 14348 | RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT(). |
| 14349 | SimpleTy); |
| 14350 | SDValue InChain = DAG.getEntryNode(); |
| 14351 | TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(), |
| 14352 | Subtarget); |
| 14353 | bool isSigned = N->getOpcode() == ISD::SREM; |
| 14354 | SDValue Callee = DAG.getExternalFunctionSymbol(getLibcallName(LC)); |
| 14355 | |
| 14356 | if (Subtarget->isTargetWindows()) |
| 14357 | InChain = WinDBZCheckDenominator(DAG, N, InChain); |
| 14358 | |
| 14359 | // Lower call |
| 14360 | CallLoweringInfo CLI(DAG); |
| 14361 | CLI.setChain(InChain) |
| 14362 | .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args)) |
| 14363 | .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N)); |
| 14364 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
| 14365 | |
| 14366 | // Return second (rem) result operand (first contains div) |
| 14367 | SDNode *ResNode = CallResult.first.getNode(); |
| 14368 | assert(ResNode->getNumOperands() == 2 && "divmod should return two operands" ); |
| 14369 | return ResNode->getOperand(1); |
| 14370 | } |
| 14371 | |
| 14372 | SDValue |
| 14373 | ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { |
| 14374 | assert(Subtarget->isTargetWindows() && "unsupported target platform" ); |
| 14375 | SDLoc DL(Op); |
| 14376 | |
| 14377 | // Get the inputs. |
| 14378 | SDValue Chain = Op.getOperand(0); |
| 14379 | SDValue Size = Op.getOperand(1); |
| 14380 | |
| 14381 | if (DAG.getMachineFunction().getFunction().hasFnAttribute( |
| 14382 | "no-stack-arg-probe" )) { |
| 14383 | unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); |
| 14384 | SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); |
| 14385 | Chain = SP.getValue(1); |
| 14386 | SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size); |
| 14387 | if (Align) |
| 14388 | SP = DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0), |
| 14389 | DAG.getConstant(-(uint64_t)Align, DL, MVT::i32)); |
| 14390 | Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP); |
| 14391 | SDValue Ops[2] = { SP, Chain }; |
| 14392 | return DAG.getMergeValues(Ops, DL); |
| 14393 | } |
| 14394 | |
| 14395 | SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size, |
| 14396 | DAG.getConstant(2, DL, MVT::i32)); |
| 14397 | |
| 14398 | SDValue Flag; |
| 14399 | Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag); |
| 14400 | Flag = Chain.getValue(1); |
| 14401 | |
| 14402 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
| 14403 | Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag); |
| 14404 | |
| 14405 | SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); |
| 14406 | Chain = NewSP.getValue(1); |
| 14407 | |
| 14408 | SDValue Ops[2] = { NewSP, Chain }; |
| 14409 | return DAG.getMergeValues(Ops, DL); |
| 14410 | } |
| 14411 | |
| 14412 | SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { |
| 14413 | assert(Op.getValueType() == MVT::f64 && !Subtarget->hasFP64() && |
| 14414 | "Unexpected type for custom-lowering FP_EXTEND" ); |
| 14415 | |
| 14416 | RTLIB::Libcall LC; |
| 14417 | LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); |
| 14418 | |
| 14419 | SDValue SrcVal = Op.getOperand(0); |
| 14420 | return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, |
| 14421 | SDLoc(Op)).first; |
| 14422 | } |
| 14423 | |
| 14424 | SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { |
| 14425 | assert(Op.getOperand(0).getValueType() == MVT::f64 && !Subtarget->hasFP64() && |
| 14426 | "Unexpected type for custom-lowering FP_ROUND" ); |
| 14427 | |
| 14428 | RTLIB::Libcall LC; |
| 14429 | LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); |
| 14430 | |
| 14431 | SDValue SrcVal = Op.getOperand(0); |
| 14432 | return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, |
| 14433 | SDLoc(Op)).first; |
| 14434 | } |
| 14435 | |
| 14436 | void ARMTargetLowering::lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 14437 | SelectionDAG &DAG) const { |
| 14438 | assert(N->getValueType(0) == MVT::i64 && "Unexpected type (!= i64) on ABS." ); |
| 14439 | MVT HalfT = MVT::i32; |
| 14440 | SDLoc dl(N); |
| 14441 | SDValue Hi, Lo, Tmp; |
| 14442 | |
| 14443 | if (!isOperationLegalOrCustom(ISD::ADDCARRY, HalfT) || |
| 14444 | !isOperationLegalOrCustom(ISD::UADDO, HalfT)) |
| 14445 | return ; |
| 14446 | |
| 14447 | unsigned OpTypeBits = HalfT.getScalarSizeInBits(); |
| 14448 | SDVTList VTList = DAG.getVTList(HalfT, MVT::i1); |
| 14449 | |
| 14450 | Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0), |
| 14451 | DAG.getConstant(0, dl, HalfT)); |
| 14452 | Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0), |
| 14453 | DAG.getConstant(1, dl, HalfT)); |
| 14454 | |
| 14455 | Tmp = DAG.getNode(ISD::SRA, dl, HalfT, Hi, |
| 14456 | DAG.getConstant(OpTypeBits - 1, dl, |
| 14457 | getShiftAmountTy(HalfT, DAG.getDataLayout()))); |
| 14458 | Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo); |
| 14459 | Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi, |
| 14460 | SDValue(Lo.getNode(), 1)); |
| 14461 | Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi); |
| 14462 | Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo); |
| 14463 | |
| 14464 | Results.push_back(Lo); |
| 14465 | Results.push_back(Hi); |
| 14466 | } |
| 14467 | |
| 14468 | bool |
| 14469 | ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { |
| 14470 | // The ARM target isn't yet aware of offsets. |
| 14471 | return false; |
| 14472 | } |
| 14473 | |
| 14474 | bool ARM::isBitFieldInvertedMask(unsigned v) { |
| 14475 | if (v == 0xffffffff) |
| 14476 | return false; |
| 14477 | |
| 14478 | // there can be 1's on either or both "outsides", all the "inside" |
| 14479 | // bits must be 0's |
| 14480 | return isShiftedMask_32(~v); |
| 14481 | } |
| 14482 | |
| 14483 | /// isFPImmLegal - Returns true if the target can instruction select the |
| 14484 | /// specified FP immediate natively. If false, the legalizer will |
| 14485 | /// materialize the FP immediate as a load from a constant pool. |
| 14486 | bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, |
| 14487 | bool ForCodeSize) const { |
| 14488 | if (!Subtarget->hasVFP3Base()) |
| 14489 | return false; |
| 14490 | if (VT == MVT::f16 && Subtarget->hasFullFP16()) |
| 14491 | return ARM_AM::getFP16Imm(Imm) != -1; |
| 14492 | if (VT == MVT::f32) |
| 14493 | return ARM_AM::getFP32Imm(Imm) != -1; |
| 14494 | if (VT == MVT::f64 && Subtarget->hasFP64()) |
| 14495 | return ARM_AM::getFP64Imm(Imm) != -1; |
| 14496 | return false; |
| 14497 | } |
| 14498 | |
| 14499 | /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as |
| 14500 | /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment |
| 14501 | /// specified in the intrinsic calls. |
| 14502 | bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, |
| 14503 | const CallInst &I, |
| 14504 | MachineFunction &MF, |
| 14505 | unsigned Intrinsic) const { |
| 14506 | switch (Intrinsic) { |
| 14507 | case Intrinsic::arm_neon_vld1: |
| 14508 | case Intrinsic::arm_neon_vld2: |
| 14509 | case Intrinsic::arm_neon_vld3: |
| 14510 | case Intrinsic::arm_neon_vld4: |
| 14511 | case Intrinsic::arm_neon_vld2lane: |
| 14512 | case Intrinsic::arm_neon_vld3lane: |
| 14513 | case Intrinsic::arm_neon_vld4lane: |
| 14514 | case Intrinsic::arm_neon_vld2dup: |
| 14515 | case Intrinsic::arm_neon_vld3dup: |
| 14516 | case Intrinsic::arm_neon_vld4dup: { |
| 14517 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 14518 | // Conservatively set memVT to the entire set of vectors loaded. |
| 14519 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
| 14520 | uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; |
| 14521 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); |
| 14522 | Info.ptrVal = I.getArgOperand(0); |
| 14523 | Info.offset = 0; |
| 14524 | Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); |
| 14525 | Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); |
| 14526 | // volatile loads with NEON intrinsics not supported |
| 14527 | Info.flags = MachineMemOperand::MOLoad; |
| 14528 | return true; |
| 14529 | } |
| 14530 | case Intrinsic::arm_neon_vld1x2: |
| 14531 | case Intrinsic::arm_neon_vld1x3: |
| 14532 | case Intrinsic::arm_neon_vld1x4: { |
| 14533 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 14534 | // Conservatively set memVT to the entire set of vectors loaded. |
| 14535 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
| 14536 | uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; |
| 14537 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); |
| 14538 | Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1); |
| 14539 | Info.offset = 0; |
| 14540 | Info.align = 0; |
| 14541 | // volatile loads with NEON intrinsics not supported |
| 14542 | Info.flags = MachineMemOperand::MOLoad; |
| 14543 | return true; |
| 14544 | } |
| 14545 | case Intrinsic::arm_neon_vst1: |
| 14546 | case Intrinsic::arm_neon_vst2: |
| 14547 | case Intrinsic::arm_neon_vst3: |
| 14548 | case Intrinsic::arm_neon_vst4: |
| 14549 | case Intrinsic::arm_neon_vst2lane: |
| 14550 | case Intrinsic::arm_neon_vst3lane: |
| 14551 | case Intrinsic::arm_neon_vst4lane: { |
| 14552 | Info.opc = ISD::INTRINSIC_VOID; |
| 14553 | // Conservatively set memVT to the entire set of vectors stored. |
| 14554 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
| 14555 | unsigned NumElts = 0; |
| 14556 | for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { |
| 14557 | Type *ArgTy = I.getArgOperand(ArgI)->getType(); |
| 14558 | if (!ArgTy->isVectorTy()) |
| 14559 | break; |
| 14560 | NumElts += DL.getTypeSizeInBits(ArgTy) / 64; |
| 14561 | } |
| 14562 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); |
| 14563 | Info.ptrVal = I.getArgOperand(0); |
| 14564 | Info.offset = 0; |
| 14565 | Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); |
| 14566 | Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); |
| 14567 | // volatile stores with NEON intrinsics not supported |
| 14568 | Info.flags = MachineMemOperand::MOStore; |
| 14569 | return true; |
| 14570 | } |
| 14571 | case Intrinsic::arm_neon_vst1x2: |
| 14572 | case Intrinsic::arm_neon_vst1x3: |
| 14573 | case Intrinsic::arm_neon_vst1x4: { |
| 14574 | Info.opc = ISD::INTRINSIC_VOID; |
| 14575 | // Conservatively set memVT to the entire set of vectors stored. |
| 14576 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
| 14577 | unsigned NumElts = 0; |
| 14578 | for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { |
| 14579 | Type *ArgTy = I.getArgOperand(ArgI)->getType(); |
| 14580 | if (!ArgTy->isVectorTy()) |
| 14581 | break; |
| 14582 | NumElts += DL.getTypeSizeInBits(ArgTy) / 64; |
| 14583 | } |
| 14584 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); |
| 14585 | Info.ptrVal = I.getArgOperand(0); |
| 14586 | Info.offset = 0; |
| 14587 | Info.align = 0; |
| 14588 | // volatile stores with NEON intrinsics not supported |
| 14589 | Info.flags = MachineMemOperand::MOStore; |
| 14590 | return true; |
| 14591 | } |
| 14592 | case Intrinsic::arm_ldaex: |
| 14593 | case Intrinsic::arm_ldrex: { |
| 14594 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
| 14595 | PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); |
| 14596 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 14597 | Info.memVT = MVT::getVT(PtrTy->getElementType()); |
| 14598 | Info.ptrVal = I.getArgOperand(0); |
| 14599 | Info.offset = 0; |
| 14600 | Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); |
| 14601 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
| 14602 | return true; |
| 14603 | } |
| 14604 | case Intrinsic::arm_stlex: |
| 14605 | case Intrinsic::arm_strex: { |
| 14606 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
| 14607 | PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); |
| 14608 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 14609 | Info.memVT = MVT::getVT(PtrTy->getElementType()); |
| 14610 | Info.ptrVal = I.getArgOperand(1); |
| 14611 | Info.offset = 0; |
| 14612 | Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); |
| 14613 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
| 14614 | return true; |
| 14615 | } |
| 14616 | case Intrinsic::arm_stlexd: |
| 14617 | case Intrinsic::arm_strexd: |
| 14618 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 14619 | Info.memVT = MVT::i64; |
| 14620 | Info.ptrVal = I.getArgOperand(2); |
| 14621 | Info.offset = 0; |
| 14622 | Info.align = 8; |
| 14623 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
| 14624 | return true; |
| 14625 | |
| 14626 | case Intrinsic::arm_ldaexd: |
| 14627 | case Intrinsic::arm_ldrexd: |
| 14628 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 14629 | Info.memVT = MVT::i64; |
| 14630 | Info.ptrVal = I.getArgOperand(0); |
| 14631 | Info.offset = 0; |
| 14632 | Info.align = 8; |
| 14633 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
| 14634 | return true; |
| 14635 | |
| 14636 | default: |
| 14637 | break; |
| 14638 | } |
| 14639 | |
| 14640 | return false; |
| 14641 | } |
| 14642 | |
| 14643 | /// Returns true if it is beneficial to convert a load of a constant |
| 14644 | /// to just the constant itself. |
| 14645 | bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
| 14646 | Type *Ty) const { |
| 14647 | assert(Ty->isIntegerTy()); |
| 14648 | |
| 14649 | unsigned Bits = Ty->getPrimitiveSizeInBits(); |
| 14650 | if (Bits == 0 || Bits > 32) |
| 14651 | return false; |
| 14652 | return true; |
| 14653 | } |
| 14654 | |
| 14655 | bool ARMTargetLowering::(EVT ResVT, EVT SrcVT, |
| 14656 | unsigned Index) const { |
| 14657 | if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) |
| 14658 | return false; |
| 14659 | |
| 14660 | return (Index == 0 || Index == ResVT.getVectorNumElements()); |
| 14661 | } |
| 14662 | |
| 14663 | Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder, |
| 14664 | ARM_MB::MemBOpt Domain) const { |
| 14665 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 14666 | |
| 14667 | // First, if the target has no DMB, see what fallback we can use. |
| 14668 | if (!Subtarget->hasDataBarrier()) { |
| 14669 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
| 14670 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
| 14671 | // here. |
| 14672 | if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { |
| 14673 | Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr); |
| 14674 | Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0), |
| 14675 | Builder.getInt32(0), Builder.getInt32(7), |
| 14676 | Builder.getInt32(10), Builder.getInt32(5)}; |
| 14677 | return Builder.CreateCall(MCR, args); |
| 14678 | } else { |
| 14679 | // Instead of using barriers, atomic accesses on these subtargets use |
| 14680 | // libcalls. |
| 14681 | llvm_unreachable("makeDMB on a target so old that it has no barriers" ); |
| 14682 | } |
| 14683 | } else { |
| 14684 | Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb); |
| 14685 | // Only a full system barrier exists in the M-class architectures. |
| 14686 | Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; |
| 14687 | Constant *CDomain = Builder.getInt32(Domain); |
| 14688 | return Builder.CreateCall(DMB, CDomain); |
| 14689 | } |
| 14690 | } |
| 14691 | |
| 14692 | // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html |
| 14693 | Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder, |
| 14694 | Instruction *Inst, |
| 14695 | AtomicOrdering Ord) const { |
| 14696 | switch (Ord) { |
| 14697 | case AtomicOrdering::NotAtomic: |
| 14698 | case AtomicOrdering::Unordered: |
| 14699 | llvm_unreachable("Invalid fence: unordered/non-atomic" ); |
| 14700 | case AtomicOrdering::Monotonic: |
| 14701 | case AtomicOrdering::Acquire: |
| 14702 | return nullptr; // Nothing to do |
| 14703 | case AtomicOrdering::SequentiallyConsistent: |
| 14704 | if (!Inst->hasAtomicStore()) |
| 14705 | return nullptr; // Nothing to do |
| 14706 | LLVM_FALLTHROUGH; |
| 14707 | case AtomicOrdering::Release: |
| 14708 | case AtomicOrdering::AcquireRelease: |
| 14709 | if (Subtarget->preferISHSTBarriers()) |
| 14710 | return makeDMB(Builder, ARM_MB::ISHST); |
| 14711 | // FIXME: add a comment with a link to documentation justifying this. |
| 14712 | else |
| 14713 | return makeDMB(Builder, ARM_MB::ISH); |
| 14714 | } |
| 14715 | llvm_unreachable("Unknown fence ordering in emitLeadingFence" ); |
| 14716 | } |
| 14717 | |
| 14718 | Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder, |
| 14719 | Instruction *Inst, |
| 14720 | AtomicOrdering Ord) const { |
| 14721 | switch (Ord) { |
| 14722 | case AtomicOrdering::NotAtomic: |
| 14723 | case AtomicOrdering::Unordered: |
| 14724 | llvm_unreachable("Invalid fence: unordered/not-atomic" ); |
| 14725 | case AtomicOrdering::Monotonic: |
| 14726 | case AtomicOrdering::Release: |
| 14727 | return nullptr; // Nothing to do |
| 14728 | case AtomicOrdering::Acquire: |
| 14729 | case AtomicOrdering::AcquireRelease: |
| 14730 | case AtomicOrdering::SequentiallyConsistent: |
| 14731 | return makeDMB(Builder, ARM_MB::ISH); |
| 14732 | } |
| 14733 | llvm_unreachable("Unknown fence ordering in emitTrailingFence" ); |
| 14734 | } |
| 14735 | |
| 14736 | // Loads and stores less than 64-bits are already atomic; ones above that |
| 14737 | // are doomed anyway, so defer to the default libcall and blame the OS when |
| 14738 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
| 14739 | // anything for those. |
| 14740 | bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { |
| 14741 | unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); |
| 14742 | return (Size == 64) && !Subtarget->isMClass(); |
| 14743 | } |
| 14744 | |
| 14745 | // Loads and stores less than 64-bits are already atomic; ones above that |
| 14746 | // are doomed anyway, so defer to the default libcall and blame the OS when |
| 14747 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
| 14748 | // anything for those. |
| 14749 | // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that |
| 14750 | // guarantee, see DDI0406C ARM architecture reference manual, |
| 14751 | // sections A8.8.72-74 LDRD) |
| 14752 | TargetLowering::AtomicExpansionKind |
| 14753 | ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { |
| 14754 | unsigned Size = LI->getType()->getPrimitiveSizeInBits(); |
| 14755 | return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly |
| 14756 | : AtomicExpansionKind::None; |
| 14757 | } |
| 14758 | |
| 14759 | // For the real atomic operations, we have ldrex/strex up to 32 bits, |
| 14760 | // and up to 64 bits on the non-M profiles |
| 14761 | TargetLowering::AtomicExpansionKind |
| 14762 | ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { |
| 14763 | if (AI->isFloatingPointOperation()) |
| 14764 | return AtomicExpansionKind::CmpXChg; |
| 14765 | |
| 14766 | unsigned Size = AI->getType()->getPrimitiveSizeInBits(); |
| 14767 | bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps(); |
| 14768 | return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW) |
| 14769 | ? AtomicExpansionKind::LLSC |
| 14770 | : AtomicExpansionKind::None; |
| 14771 | } |
| 14772 | |
| 14773 | TargetLowering::AtomicExpansionKind |
| 14774 | ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { |
| 14775 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to |
| 14776 | // implement cmpxchg without spilling. If the address being exchanged is also |
| 14777 | // on the stack and close enough to the spill slot, this can lead to a |
| 14778 | // situation where the monitor always gets cleared and the atomic operation |
| 14779 | // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. |
| 14780 | bool HasAtomicCmpXchg = |
| 14781 | !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps(); |
| 14782 | if (getTargetMachine().getOptLevel() != 0 && HasAtomicCmpXchg) |
| 14783 | return AtomicExpansionKind::LLSC; |
| 14784 | return AtomicExpansionKind::None; |
| 14785 | } |
| 14786 | |
| 14787 | bool ARMTargetLowering::shouldInsertFencesForAtomic( |
| 14788 | const Instruction *I) const { |
| 14789 | return InsertFencesForAtomic; |
| 14790 | } |
| 14791 | |
| 14792 | // This has so far only been implemented for MachO. |
| 14793 | bool ARMTargetLowering::useLoadStackGuardNode() const { |
| 14794 | return Subtarget->isTargetMachO(); |
| 14795 | } |
| 14796 | |
| 14797 | bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, |
| 14798 | unsigned &Cost) const { |
| 14799 | // If we do not have NEON, vector types are not natively supported. |
| 14800 | if (!Subtarget->hasNEON()) |
| 14801 | return false; |
| 14802 | |
| 14803 | // Floating point values and vector values map to the same register file. |
| 14804 | // Therefore, although we could do a store extract of a vector type, this is |
| 14805 | // better to leave at float as we have more freedom in the addressing mode for |
| 14806 | // those. |
| 14807 | if (VectorTy->isFPOrFPVectorTy()) |
| 14808 | return false; |
| 14809 | |
| 14810 | // If the index is unknown at compile time, this is very expensive to lower |
| 14811 | // and it is not possible to combine the store with the extract. |
| 14812 | if (!isa<ConstantInt>(Idx)) |
| 14813 | return false; |
| 14814 | |
| 14815 | assert(VectorTy->isVectorTy() && "VectorTy is not a vector type" ); |
| 14816 | unsigned BitWidth = cast<VectorType>(VectorTy)->getBitWidth(); |
| 14817 | // We can do a store + vector extract on any vector that fits perfectly in a D |
| 14818 | // or Q register. |
| 14819 | if (BitWidth == 64 || BitWidth == 128) { |
| 14820 | Cost = 0; |
| 14821 | return true; |
| 14822 | } |
| 14823 | return false; |
| 14824 | } |
| 14825 | |
| 14826 | bool ARMTargetLowering::isCheapToSpeculateCttz() const { |
| 14827 | return Subtarget->hasV6T2Ops(); |
| 14828 | } |
| 14829 | |
| 14830 | bool ARMTargetLowering::isCheapToSpeculateCtlz() const { |
| 14831 | return Subtarget->hasV6T2Ops(); |
| 14832 | } |
| 14833 | |
| 14834 | bool ARMTargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const { |
| 14835 | return !Subtarget->hasMinSize(); |
| 14836 | } |
| 14837 | |
| 14838 | Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, |
| 14839 | AtomicOrdering Ord) const { |
| 14840 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 14841 | Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); |
| 14842 | bool IsAcquire = isAcquireOrStronger(Ord); |
| 14843 | |
| 14844 | // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd |
| 14845 | // intrinsic must return {i32, i32} and we have to recombine them into a |
| 14846 | // single i64 here. |
| 14847 | if (ValTy->getPrimitiveSizeInBits() == 64) { |
| 14848 | Intrinsic::ID Int = |
| 14849 | IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; |
| 14850 | Function *Ldrex = Intrinsic::getDeclaration(M, Int); |
| 14851 | |
| 14852 | Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); |
| 14853 | Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi" ); |
| 14854 | |
| 14855 | Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo" ); |
| 14856 | Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi" ); |
| 14857 | if (!Subtarget->isLittle()) |
| 14858 | std::swap (Lo, Hi); |
| 14859 | Lo = Builder.CreateZExt(Lo, ValTy, "lo64" ); |
| 14860 | Hi = Builder.CreateZExt(Hi, ValTy, "hi64" ); |
| 14861 | return Builder.CreateOr( |
| 14862 | Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64" ); |
| 14863 | } |
| 14864 | |
| 14865 | Type *Tys[] = { Addr->getType() }; |
| 14866 | Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; |
| 14867 | Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys); |
| 14868 | |
| 14869 | return Builder.CreateTruncOrBitCast( |
| 14870 | Builder.CreateCall(Ldrex, Addr), |
| 14871 | cast<PointerType>(Addr->getType())->getElementType()); |
| 14872 | } |
| 14873 | |
| 14874 | void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( |
| 14875 | IRBuilder<> &Builder) const { |
| 14876 | if (!Subtarget->hasV7Ops()) |
| 14877 | return; |
| 14878 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 14879 | Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex)); |
| 14880 | } |
| 14881 | |
| 14882 | Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, |
| 14883 | Value *Addr, |
| 14884 | AtomicOrdering Ord) const { |
| 14885 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 14886 | bool IsRelease = isReleaseOrStronger(Ord); |
| 14887 | |
| 14888 | // Since the intrinsics must have legal type, the i64 intrinsics take two |
| 14889 | // parameters: "i32, i32". We must marshal Val into the appropriate form |
| 14890 | // before the call. |
| 14891 | if (Val->getType()->getPrimitiveSizeInBits() == 64) { |
| 14892 | Intrinsic::ID Int = |
| 14893 | IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; |
| 14894 | Function *Strex = Intrinsic::getDeclaration(M, Int); |
| 14895 | Type *Int32Ty = Type::getInt32Ty(M->getContext()); |
| 14896 | |
| 14897 | Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo" ); |
| 14898 | Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi" ); |
| 14899 | if (!Subtarget->isLittle()) |
| 14900 | std::swap(Lo, Hi); |
| 14901 | Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); |
| 14902 | return Builder.CreateCall(Strex, {Lo, Hi, Addr}); |
| 14903 | } |
| 14904 | |
| 14905 | Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; |
| 14906 | Type *Tys[] = { Addr->getType() }; |
| 14907 | Function *Strex = Intrinsic::getDeclaration(M, Int, Tys); |
| 14908 | |
| 14909 | return Builder.CreateCall( |
| 14910 | Strex, {Builder.CreateZExtOrBitCast( |
| 14911 | Val, Strex->getFunctionType()->getParamType(0)), |
| 14912 | Addr}); |
| 14913 | } |
| 14914 | |
| 14915 | |
| 14916 | bool ARMTargetLowering::alignLoopsWithOptSize() const { |
| 14917 | return Subtarget->isMClass(); |
| 14918 | } |
| 14919 | |
| 14920 | /// A helper function for determining the number of interleaved accesses we |
| 14921 | /// will generate when lowering accesses of the given type. |
| 14922 | unsigned |
| 14923 | ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy, |
| 14924 | const DataLayout &DL) const { |
| 14925 | return (DL.getTypeSizeInBits(VecTy) + 127) / 128; |
| 14926 | } |
| 14927 | |
| 14928 | bool ARMTargetLowering::isLegalInterleavedAccessType( |
| 14929 | VectorType *VecTy, const DataLayout &DL) const { |
| 14930 | |
| 14931 | unsigned VecSize = DL.getTypeSizeInBits(VecTy); |
| 14932 | unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType()); |
| 14933 | |
| 14934 | // Ensure the vector doesn't have f16 elements. Even though we could do an |
| 14935 | // i16 vldN, we can't hold the f16 vectors and will end up converting via |
| 14936 | // f32. |
| 14937 | if (VecTy->getElementType()->isHalfTy()) |
| 14938 | return false; |
| 14939 | |
| 14940 | // Ensure the number of vector elements is greater than 1. |
| 14941 | if (VecTy->getNumElements() < 2) |
| 14942 | return false; |
| 14943 | |
| 14944 | // Ensure the element type is legal. |
| 14945 | if (ElSize != 8 && ElSize != 16 && ElSize != 32) |
| 14946 | return false; |
| 14947 | |
| 14948 | // Ensure the total vector size is 64 or a multiple of 128. Types larger than |
| 14949 | // 128 will be split into multiple interleaved accesses. |
| 14950 | return VecSize == 64 || VecSize % 128 == 0; |
| 14951 | } |
| 14952 | |
| 14953 | /// Lower an interleaved load into a vldN intrinsic. |
| 14954 | /// |
| 14955 | /// E.g. Lower an interleaved load (Factor = 2): |
| 14956 | /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 |
| 14957 | /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements |
| 14958 | /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements |
| 14959 | /// |
| 14960 | /// Into: |
| 14961 | /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) |
| 14962 | /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 |
| 14963 | /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 |
| 14964 | bool ARMTargetLowering::lowerInterleavedLoad( |
| 14965 | LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, |
| 14966 | ArrayRef<unsigned> Indices, unsigned Factor) const { |
| 14967 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && |
| 14968 | "Invalid interleave factor" ); |
| 14969 | assert(!Shuffles.empty() && "Empty shufflevector input" ); |
| 14970 | assert(Shuffles.size() == Indices.size() && |
| 14971 | "Unmatched number of shufflevectors and indices" ); |
| 14972 | |
| 14973 | VectorType *VecTy = Shuffles[0]->getType(); |
| 14974 | Type *EltTy = VecTy->getVectorElementType(); |
| 14975 | |
| 14976 | const DataLayout &DL = LI->getModule()->getDataLayout(); |
| 14977 | |
| 14978 | // Skip if we do not have NEON and skip illegal vector types. We can |
| 14979 | // "legalize" wide vector types into multiple interleaved accesses as long as |
| 14980 | // the vector types are divisible by 128. |
| 14981 | if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(VecTy, DL)) |
| 14982 | return false; |
| 14983 | |
| 14984 | unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL); |
| 14985 | |
| 14986 | // A pointer vector can not be the return type of the ldN intrinsics. Need to |
| 14987 | // load integer vectors first and then convert to pointer vectors. |
| 14988 | if (EltTy->isPointerTy()) |
| 14989 | VecTy = |
| 14990 | VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements()); |
| 14991 | |
| 14992 | IRBuilder<> Builder(LI); |
| 14993 | |
| 14994 | // The base address of the load. |
| 14995 | Value *BaseAddr = LI->getPointerOperand(); |
| 14996 | |
| 14997 | if (NumLoads > 1) { |
| 14998 | // If we're going to generate more than one load, reset the sub-vector type |
| 14999 | // to something legal. |
| 15000 | VecTy = VectorType::get(VecTy->getVectorElementType(), |
| 15001 | VecTy->getVectorNumElements() / NumLoads); |
| 15002 | |
| 15003 | // We will compute the pointer operand of each load from the original base |
| 15004 | // address using GEPs. Cast the base address to a pointer to the scalar |
| 15005 | // element type. |
| 15006 | BaseAddr = Builder.CreateBitCast( |
| 15007 | BaseAddr, VecTy->getVectorElementType()->getPointerTo( |
| 15008 | LI->getPointerAddressSpace())); |
| 15009 | } |
| 15010 | |
| 15011 | assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!" ); |
| 15012 | |
| 15013 | Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace()); |
| 15014 | Type *Tys[] = {VecTy, Int8Ptr}; |
| 15015 | static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, |
| 15016 | Intrinsic::arm_neon_vld3, |
| 15017 | Intrinsic::arm_neon_vld4}; |
| 15018 | Function *VldnFunc = |
| 15019 | Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys); |
| 15020 | |
| 15021 | // Holds sub-vectors extracted from the load intrinsic return values. The |
| 15022 | // sub-vectors are associated with the shufflevector instructions they will |
| 15023 | // replace. |
| 15024 | DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; |
| 15025 | |
| 15026 | for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { |
| 15027 | // If we're generating more than one load, compute the base address of |
| 15028 | // subsequent loads as an offset from the previous. |
| 15029 | if (LoadCount > 0) |
| 15030 | BaseAddr = |
| 15031 | Builder.CreateConstGEP1_32(VecTy->getVectorElementType(), BaseAddr, |
| 15032 | VecTy->getVectorNumElements() * Factor); |
| 15033 | |
| 15034 | SmallVector<Value *, 2> Ops; |
| 15035 | Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); |
| 15036 | Ops.push_back(Builder.getInt32(LI->getAlignment())); |
| 15037 | |
| 15038 | CallInst *VldN = Builder.CreateCall(VldnFunc, Ops, "vldN" ); |
| 15039 | |
| 15040 | // Replace uses of each shufflevector with the corresponding vector loaded |
| 15041 | // by ldN. |
| 15042 | for (unsigned i = 0; i < Shuffles.size(); i++) { |
| 15043 | ShuffleVectorInst *SV = Shuffles[i]; |
| 15044 | unsigned Index = Indices[i]; |
| 15045 | |
| 15046 | Value *SubVec = Builder.CreateExtractValue(VldN, Index); |
| 15047 | |
| 15048 | // Convert the integer vector to pointer vector if the element is pointer. |
| 15049 | if (EltTy->isPointerTy()) |
| 15050 | SubVec = Builder.CreateIntToPtr( |
| 15051 | SubVec, VectorType::get(SV->getType()->getVectorElementType(), |
| 15052 | VecTy->getVectorNumElements())); |
| 15053 | |
| 15054 | SubVecs[SV].push_back(SubVec); |
| 15055 | } |
| 15056 | } |
| 15057 | |
| 15058 | // Replace uses of the shufflevector instructions with the sub-vectors |
| 15059 | // returned by the load intrinsic. If a shufflevector instruction is |
| 15060 | // associated with more than one sub-vector, those sub-vectors will be |
| 15061 | // concatenated into a single wide vector. |
| 15062 | for (ShuffleVectorInst *SVI : Shuffles) { |
| 15063 | auto &SubVec = SubVecs[SVI]; |
| 15064 | auto *WideVec = |
| 15065 | SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0]; |
| 15066 | SVI->replaceAllUsesWith(WideVec); |
| 15067 | } |
| 15068 | |
| 15069 | return true; |
| 15070 | } |
| 15071 | |
| 15072 | /// Lower an interleaved store into a vstN intrinsic. |
| 15073 | /// |
| 15074 | /// E.g. Lower an interleaved store (Factor = 3): |
| 15075 | /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, |
| 15076 | /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> |
| 15077 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 |
| 15078 | /// |
| 15079 | /// Into: |
| 15080 | /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> |
| 15081 | /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> |
| 15082 | /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> |
| 15083 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
| 15084 | /// |
| 15085 | /// Note that the new shufflevectors will be removed and we'll only generate one |
| 15086 | /// vst3 instruction in CodeGen. |
| 15087 | /// |
| 15088 | /// Example for a more general valid mask (Factor 3). Lower: |
| 15089 | /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, |
| 15090 | /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> |
| 15091 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr |
| 15092 | /// |
| 15093 | /// Into: |
| 15094 | /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> |
| 15095 | /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> |
| 15096 | /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> |
| 15097 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
| 15098 | bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, |
| 15099 | ShuffleVectorInst *SVI, |
| 15100 | unsigned Factor) const { |
| 15101 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && |
| 15102 | "Invalid interleave factor" ); |
| 15103 | |
| 15104 | VectorType *VecTy = SVI->getType(); |
| 15105 | assert(VecTy->getVectorNumElements() % Factor == 0 && |
| 15106 | "Invalid interleaved store" ); |
| 15107 | |
| 15108 | unsigned LaneLen = VecTy->getVectorNumElements() / Factor; |
| 15109 | Type *EltTy = VecTy->getVectorElementType(); |
| 15110 | VectorType *SubVecTy = VectorType::get(EltTy, LaneLen); |
| 15111 | |
| 15112 | const DataLayout &DL = SI->getModule()->getDataLayout(); |
| 15113 | |
| 15114 | // Skip if we do not have NEON and skip illegal vector types. We can |
| 15115 | // "legalize" wide vector types into multiple interleaved accesses as long as |
| 15116 | // the vector types are divisible by 128. |
| 15117 | if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(SubVecTy, DL)) |
| 15118 | return false; |
| 15119 | |
| 15120 | unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL); |
| 15121 | |
| 15122 | Value *Op0 = SVI->getOperand(0); |
| 15123 | Value *Op1 = SVI->getOperand(1); |
| 15124 | IRBuilder<> Builder(SI); |
| 15125 | |
| 15126 | // StN intrinsics don't support pointer vectors as arguments. Convert pointer |
| 15127 | // vectors to integer vectors. |
| 15128 | if (EltTy->isPointerTy()) { |
| 15129 | Type *IntTy = DL.getIntPtrType(EltTy); |
| 15130 | |
| 15131 | // Convert to the corresponding integer vector. |
| 15132 | Type *IntVecTy = |
| 15133 | VectorType::get(IntTy, Op0->getType()->getVectorNumElements()); |
| 15134 | Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); |
| 15135 | Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); |
| 15136 | |
| 15137 | SubVecTy = VectorType::get(IntTy, LaneLen); |
| 15138 | } |
| 15139 | |
| 15140 | // The base address of the store. |
| 15141 | Value *BaseAddr = SI->getPointerOperand(); |
| 15142 | |
| 15143 | if (NumStores > 1) { |
| 15144 | // If we're going to generate more than one store, reset the lane length |
| 15145 | // and sub-vector type to something legal. |
| 15146 | LaneLen /= NumStores; |
| 15147 | SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen); |
| 15148 | |
| 15149 | // We will compute the pointer operand of each store from the original base |
| 15150 | // address using GEPs. Cast the base address to a pointer to the scalar |
| 15151 | // element type. |
| 15152 | BaseAddr = Builder.CreateBitCast( |
| 15153 | BaseAddr, SubVecTy->getVectorElementType()->getPointerTo( |
| 15154 | SI->getPointerAddressSpace())); |
| 15155 | } |
| 15156 | |
| 15157 | assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!" ); |
| 15158 | |
| 15159 | auto Mask = SVI->getShuffleMask(); |
| 15160 | |
| 15161 | Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace()); |
| 15162 | Type *Tys[] = {Int8Ptr, SubVecTy}; |
| 15163 | static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, |
| 15164 | Intrinsic::arm_neon_vst3, |
| 15165 | Intrinsic::arm_neon_vst4}; |
| 15166 | |
| 15167 | for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { |
| 15168 | // If we generating more than one store, we compute the base address of |
| 15169 | // subsequent stores as an offset from the previous. |
| 15170 | if (StoreCount > 0) |
| 15171 | BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getVectorElementType(), |
| 15172 | BaseAddr, LaneLen * Factor); |
| 15173 | |
| 15174 | SmallVector<Value *, 6> Ops; |
| 15175 | Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); |
| 15176 | |
| 15177 | Function *VstNFunc = |
| 15178 | Intrinsic::getDeclaration(SI->getModule(), StoreInts[Factor - 2], Tys); |
| 15179 | |
| 15180 | // Split the shufflevector operands into sub vectors for the new vstN call. |
| 15181 | for (unsigned i = 0; i < Factor; i++) { |
| 15182 | unsigned IdxI = StoreCount * LaneLen * Factor + i; |
| 15183 | if (Mask[IdxI] >= 0) { |
| 15184 | Ops.push_back(Builder.CreateShuffleVector( |
| 15185 | Op0, Op1, createSequentialMask(Builder, Mask[IdxI], LaneLen, 0))); |
| 15186 | } else { |
| 15187 | unsigned StartMask = 0; |
| 15188 | for (unsigned j = 1; j < LaneLen; j++) { |
| 15189 | unsigned IdxJ = StoreCount * LaneLen * Factor + j; |
| 15190 | if (Mask[IdxJ * Factor + IdxI] >= 0) { |
| 15191 | StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; |
| 15192 | break; |
| 15193 | } |
| 15194 | } |
| 15195 | // Note: If all elements in a chunk are undefs, StartMask=0! |
| 15196 | // Note: Filling undef gaps with random elements is ok, since |
| 15197 | // those elements were being written anyway (with undefs). |
| 15198 | // In the case of all undefs we're defaulting to using elems from 0 |
| 15199 | // Note: StartMask cannot be negative, it's checked in |
| 15200 | // isReInterleaveMask |
| 15201 | Ops.push_back(Builder.CreateShuffleVector( |
| 15202 | Op0, Op1, createSequentialMask(Builder, StartMask, LaneLen, 0))); |
| 15203 | } |
| 15204 | } |
| 15205 | |
| 15206 | Ops.push_back(Builder.getInt32(SI->getAlignment())); |
| 15207 | Builder.CreateCall(VstNFunc, Ops); |
| 15208 | } |
| 15209 | return true; |
| 15210 | } |
| 15211 | |
| 15212 | enum HABaseType { |
| 15213 | HA_UNKNOWN = 0, |
| 15214 | HA_FLOAT, |
| 15215 | HA_DOUBLE, |
| 15216 | HA_VECT64, |
| 15217 | HA_VECT128 |
| 15218 | }; |
| 15219 | |
| 15220 | static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, |
| 15221 | uint64_t &Members) { |
| 15222 | if (auto *ST = dyn_cast<StructType>(Ty)) { |
| 15223 | for (unsigned i = 0; i < ST->getNumElements(); ++i) { |
| 15224 | uint64_t SubMembers = 0; |
| 15225 | if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers)) |
| 15226 | return false; |
| 15227 | Members += SubMembers; |
| 15228 | } |
| 15229 | } else if (auto *AT = dyn_cast<ArrayType>(Ty)) { |
| 15230 | uint64_t SubMembers = 0; |
| 15231 | if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers)) |
| 15232 | return false; |
| 15233 | Members += SubMembers * AT->getNumElements(); |
| 15234 | } else if (Ty->isFloatTy()) { |
| 15235 | if (Base != HA_UNKNOWN && Base != HA_FLOAT) |
| 15236 | return false; |
| 15237 | Members = 1; |
| 15238 | Base = HA_FLOAT; |
| 15239 | } else if (Ty->isDoubleTy()) { |
| 15240 | if (Base != HA_UNKNOWN && Base != HA_DOUBLE) |
| 15241 | return false; |
| 15242 | Members = 1; |
| 15243 | Base = HA_DOUBLE; |
| 15244 | } else if (auto *VT = dyn_cast<VectorType>(Ty)) { |
| 15245 | Members = 1; |
| 15246 | switch (Base) { |
| 15247 | case HA_FLOAT: |
| 15248 | case HA_DOUBLE: |
| 15249 | return false; |
| 15250 | case HA_VECT64: |
| 15251 | return VT->getBitWidth() == 64; |
| 15252 | case HA_VECT128: |
| 15253 | return VT->getBitWidth() == 128; |
| 15254 | case HA_UNKNOWN: |
| 15255 | switch (VT->getBitWidth()) { |
| 15256 | case 64: |
| 15257 | Base = HA_VECT64; |
| 15258 | return true; |
| 15259 | case 128: |
| 15260 | Base = HA_VECT128; |
| 15261 | return true; |
| 15262 | default: |
| 15263 | return false; |
| 15264 | } |
| 15265 | } |
| 15266 | } |
| 15267 | |
| 15268 | return (Members > 0 && Members <= 4); |
| 15269 | } |
| 15270 | |
| 15271 | /// Return the correct alignment for the current calling convention. |
| 15272 | unsigned |
| 15273 | ARMTargetLowering::getABIAlignmentForCallingConv(Type *ArgTy, |
| 15274 | DataLayout DL) const { |
| 15275 | if (!ArgTy->isVectorTy()) |
| 15276 | return DL.getABITypeAlignment(ArgTy); |
| 15277 | |
| 15278 | // Avoid over-aligning vector parameters. It would require realigning the |
| 15279 | // stack and waste space for no real benefit. |
| 15280 | return std::min(DL.getABITypeAlignment(ArgTy), DL.getStackAlignment()); |
| 15281 | } |
| 15282 | |
| 15283 | /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of |
| 15284 | /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when |
| 15285 | /// passing according to AAPCS rules. |
| 15286 | bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( |
| 15287 | Type *Ty, CallingConv::ID CallConv, bool isVarArg) const { |
| 15288 | if (getEffectiveCallingConv(CallConv, isVarArg) != |
| 15289 | CallingConv::ARM_AAPCS_VFP) |
| 15290 | return false; |
| 15291 | |
| 15292 | HABaseType Base = HA_UNKNOWN; |
| 15293 | uint64_t Members = 0; |
| 15294 | bool IsHA = isHomogeneousAggregate(Ty, Base, Members); |
| 15295 | LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " " ; Ty->dump()); |
| 15296 | |
| 15297 | bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); |
| 15298 | return IsHA || IsIntArray; |
| 15299 | } |
| 15300 | |
| 15301 | unsigned ARMTargetLowering::getExceptionPointerRegister( |
| 15302 | const Constant *PersonalityFn) const { |
| 15303 | // Platforms which do not use SjLj EH may return values in these registers |
| 15304 | // via the personality function. |
| 15305 | return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R0; |
| 15306 | } |
| 15307 | |
| 15308 | unsigned ARMTargetLowering::getExceptionSelectorRegister( |
| 15309 | const Constant *PersonalityFn) const { |
| 15310 | // Platforms which do not use SjLj EH may return values in these registers |
| 15311 | // via the personality function. |
| 15312 | return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R1; |
| 15313 | } |
| 15314 | |
| 15315 | void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { |
| 15316 | // Update IsSplitCSR in ARMFunctionInfo. |
| 15317 | ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); |
| 15318 | AFI->setIsSplitCSR(true); |
| 15319 | } |
| 15320 | |
| 15321 | void ARMTargetLowering::insertCopiesSplitCSR( |
| 15322 | MachineBasicBlock *Entry, |
| 15323 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { |
| 15324 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 15325 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); |
| 15326 | if (!IStart) |
| 15327 | return; |
| 15328 | |
| 15329 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 15330 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); |
| 15331 | MachineBasicBlock::iterator MBBI = Entry->begin(); |
| 15332 | for (const MCPhysReg *I = IStart; *I; ++I) { |
| 15333 | const TargetRegisterClass *RC = nullptr; |
| 15334 | if (ARM::GPRRegClass.contains(*I)) |
| 15335 | RC = &ARM::GPRRegClass; |
| 15336 | else if (ARM::DPRRegClass.contains(*I)) |
| 15337 | RC = &ARM::DPRRegClass; |
| 15338 | else |
| 15339 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
| 15340 | |
| 15341 | unsigned NewVR = MRI->createVirtualRegister(RC); |
| 15342 | // Create copy from CSR to a virtual register. |
| 15343 | // FIXME: this currently does not emit CFI pseudo-instructions, it works |
| 15344 | // fine for CXX_FAST_TLS since the C++-style TLS access functions should be |
| 15345 | // nounwind. If we want to generalize this later, we may need to emit |
| 15346 | // CFI pseudo-instructions. |
| 15347 | assert(Entry->getParent()->getFunction().hasFnAttribute( |
| 15348 | Attribute::NoUnwind) && |
| 15349 | "Function should be nounwind in insertCopiesSplitCSR!" ); |
| 15350 | Entry->addLiveIn(*I); |
| 15351 | BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) |
| 15352 | .addReg(*I); |
| 15353 | |
| 15354 | // Insert the copy-back instructions right before the terminator. |
| 15355 | for (auto *Exit : Exits) |
| 15356 | BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), |
| 15357 | TII->get(TargetOpcode::COPY), *I) |
| 15358 | .addReg(NewVR); |
| 15359 | } |
| 15360 | } |
| 15361 | |
| 15362 | void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const { |
| 15363 | MF.getFrameInfo().computeMaxCallFrameSize(MF); |
| 15364 | TargetLoweringBase::finalizeLowering(MF); |
| 15365 | } |
| 15366 | |